tech-kern archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Layered fs, vnode locking and v_vnlock removal
With our current vnode lock implementation VOP_LOCK() and VOP_UNLOCK()
are not symmetric. A vnode may be locked from one file system and
unlocked from another one. Is there any reason left to have layered
file systems share the vnode lock with lower file systems via v_vnlock?
The attached patch will remove v_vnlock and change layered file systems
to always pass the locking VOP's down to the leaf file system.
Comments or objections?
--
Juergen Hannken-Illjes - hannken%eis.cs.tu-bs.de@localhost - TU Braunschweig
(Germany)
Index: sys/miscfs/genfs/layer_vnops.c
===================================================================
RCS file: /cvsroot/src/sys/miscfs/genfs/layer_vnops.c,v
retrieving revision 1.39
diff -p -u -4 -r1.39 layer_vnops.c
--- sys/miscfs/genfs/layer_vnops.c 8 Jan 2010 11:35:10 -0000 1.39
+++ sys/miscfs/genfs/layer_vnops.c 23 May 2010 13:56:55 -0000
@@ -253,12 +253,9 @@ __KERNEL_RCSID(0, "$NetBSD: layer_vnops.
/*
* This is the 08-June-99 bypass routine, based on the 10-Apr-92 bypass
* routine by John Heidemann.
* The new element for this version is that the whole nullfs
- * system gained the concept of locks on the lower node, and locks on
- * our nodes. When returning from a call to the lower layer, we may
- * need to update lock state ONLY on our layer. The LAYERFS_UPPER*LOCK()
- * macros provide this functionality.
+ * system gained the concept of locks on the lower node.
* The 10-Apr-92 version was optimized for speed, throwing away some
* safety checks. It should still always work, but it's not as
* robust to programmer errors.
*
@@ -289,9 +286,9 @@ layer_bypass(void *v)
<other random data follows, presumably>
} */ *ap = v;
int (**our_vnodeop_p)(void *);
struct vnode **this_vp_p;
- int error, error1;
+ int error;
struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
struct vnode **vps_p[VDESC_MAX_VPS];
struct vnode ***vppp;
struct mount *mp;
@@ -367,10 +364,8 @@ layer_bypass(void *v)
if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
break; /* bail out at end of list */
if (old_vps[i]) {
*(vps_p[i]) = old_vps[i];
- if (reles & VDESC_VP0_WILLUNLOCK)
- LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1);
if (reles & VDESC_VP0_WILLRELE)
vrele(*(vps_p[i]));
}
}
@@ -587,11 +582,10 @@ layer_open(void *v)
return LAYERFS_DO_BYPASS(vp, ap);
}
/*
- * We need to process our own vnode lock and then clear the
- * interlock flag as it applies only to our vnode, not the
- * vnodes below us on the stack.
+ * We need to clear the interlock flag as it applies only to our vnode,
+ * not the vnodes below us on the stack.
*/
int
layer_lock(void *v)
{
@@ -599,50 +593,18 @@ layer_lock(void *v)
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap = v;
- struct vnode *vp = ap->a_vp, *lowervp;
- int flags = ap->a_flags, error;
+ struct vnode *vp = ap->a_vp;
- if (flags & LK_INTERLOCK) {
+ if (ap->a_flags & LK_INTERLOCK) {
mutex_exit(&vp->v_interlock);
- flags &= ~LK_INTERLOCK;
+ ap->a_flags &= ~LK_INTERLOCK;
}
- if (vp->v_vnlock != NULL) {
- /*
- * The lower level has exported a struct lock to us. Use
- * it so that all vnodes in the stack lock and unlock
- * simultaneously. Note: we don't DRAIN the lock as DRAIN
- * decommissions the lock - just because our vnode is
- * going away doesn't mean the struct lock below us is.
- * LK_EXCLUSIVE is fine.
- */
- return (vlockmgr(vp->v_vnlock, flags));
- } else {
- /*
- * Ahh well. It would be nice if the fs we're over would
- * export a struct lock for us to use, but it doesn't.
- *
- * To prevent race conditions involving doing a lookup
- * on "..", we have to lock the lower node, then lock our
- * node. Most of the time it won't matter that we lock our
- * node (as any locking would need the lower one locked
- * first).
- */
- lowervp = LAYERVPTOLOWERVP(vp);
- error = VOP_LOCK(lowervp, flags);
- if (error)
- return (error);
- if ((error = vlockmgr(&vp->v_lock, flags))) {
- VOP_UNLOCK(lowervp, 0);
- }
- return (error);
- }
+ return LAYERFS_DO_BYPASS(vp, ap);
}
-/*
- */
int
layer_unlock(void *v)
{
struct vop_unlock_args /* {
@@ -650,21 +612,10 @@ layer_unlock(void *v)
int a_flags;
struct proc *a_p;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
- int flags = ap->a_flags;
-
- if (flags & LK_INTERLOCK) {
- mutex_exit(&vp->v_interlock);
- flags &= ~LK_INTERLOCK;
- }
- if (vp->v_vnlock != NULL) {
- return (vlockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE));
- } else {
- VOP_UNLOCK(LAYERVPTOLOWERVP(vp), flags);
- return (vlockmgr(&vp->v_lock, flags | LK_RELEASE));
- }
+ return LAYERFS_DO_BYPASS(vp, ap);
}
int
layer_islocked(void *v)
@@ -672,18 +623,10 @@ layer_islocked(void *v)
struct vop_islocked_args /* {
struct vnode *a_vp;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
- int lkstatus;
-
- if (vp->v_vnlock != NULL)
- return vlockstatus(vp->v_vnlock);
- lkstatus = VOP_ISLOCKED(LAYERVPTOLOWERVP(vp));
- if (lkstatus)
- return lkstatus;
-
- return vlockstatus(&vp->v_lock);
+ return LAYERFS_DO_BYPASS(vp, ap);
}
/*
* If vinvalbuf is calling us, it's a "shallow fsync" -- don't bother
Index: sys/miscfs/umapfs/umap_vnops.c
===================================================================
RCS file: /cvsroot/src/sys/miscfs/umapfs/umap_vnops.c,v
retrieving revision 1.48
diff -p -u -4 -r1.48 umap_vnops.c
--- sys/miscfs/umapfs/umap_vnops.c 8 Jan 2010 11:35:11 -0000 1.48
+++ sys/miscfs/umapfs/umap_vnops.c 23 May 2010 13:56:57 -0000
@@ -123,9 +123,9 @@ umap_bypass(void *v)
kauth_cred_t *credpp = NULL, credp = 0;
kauth_cred_t savecredp = 0, savecompcredp = 0;
kauth_cred_t compcredp = 0;
struct vnode **this_vp_p;
- int error, error1;
+ int error;
struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
struct vnode **vps_p[VDESC_MAX_VPS];
struct vnode ***vppp;
struct vnodeop_desc *descp = ap->a_desc;
@@ -260,10 +260,8 @@ umap_bypass(void *v)
if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
break; /* bail out at end of list */
if (old_vps[i]) {
*(vps_p[i]) = old_vps[i];
- if (reles & VDESC_VP0_WILLUNLOCK)
- LAYERFS_UPPERUNLOCK(*(vps_p[i]), 0, error1);
if (reles & VDESC_VP0_WILLRELE)
vrele(*(vps_p[i]));
}
}
Index: sys/miscfs/genfs/layer.h
===================================================================
RCS file: /cvsroot/src/sys/miscfs/genfs/layer.h,v
retrieving revision 1.13
diff -p -u -4 -r1.13 layer.h
--- sys/miscfs/genfs/layer.h 30 Jan 2008 09:50:23 -0000 1.13
+++ sys/miscfs/genfs/layer.h 23 May 2010 13:56:55 -0000
@@ -116,35 +116,8 @@ struct layer_node {
#define LAYERFS_RESFLAGS 0x00000fff /* flags reserved for
layerfs */
#define LAYERFS_REMOVED 0x00000001 /* Did a remove on this
node */
-/*
- * The following macros handle upperfs-specific locking. They are needed
- * when the lowerfs does not export a struct lock for locking use by the
- * upper layers. These macros are inteded for adjusting the upperfs
- * struct lock to reflect changes in the underlying vnode's lock state.
- */
-#define LAYERFS_UPPERLOCK(v, f, r) do { \
- if ((v)->v_vnlock == NULL) \
- r = vlockmgr(&(v)->v_lock, (f)); \
- else \
- r = 0; \
- } while (0)
-
-#define LAYERFS_UPPERUNLOCK(v, f, r) do { \
- if ((v)->v_vnlock == NULL) \
- r = vlockmgr(&(v)->v_lock, (f) | LK_RELEASE); \
- else \
- r = 0; \
- } while (0)
-
-#define LAYERFS_UPPERISLOCKED(v, r) do { \
- if ((v)->v_vnlock == NULL) \
- r = vlockstatus(&(v)->v_lock); \
- else \
- r = -1; \
- } while (0)
-
#define LAYERFS_DO_BYPASS(vp, ap) \
(*MOUNTTOLAYERMOUNT((vp)->v_mount)->layerm_bypass)((ap))
struct vnode *layer_checkvp(struct vnode *vp, const char *fil, int lno);
Index: sys/miscfs/genfs/layer_subr.c
===================================================================
RCS file: /cvsroot/src/sys/miscfs/genfs/layer_subr.c,v
retrieving revision 1.28
diff -p -u -4 -r1.28 layer_subr.c
--- sys/miscfs/genfs/layer_subr.c 8 Jan 2010 11:35:10 -0000 1.28
+++ sys/miscfs/genfs/layer_subr.c 23 May 2010 13:56:55 -0000
@@ -169,20 +169,16 @@ loop:
/*
* We must not let vget() try to lock the layer
* vp, since the lower vp is already locked and
* locking the layer vp will involve locking
- * the lower vp (whether or not they actually
- * share a lock). Instead, take the layer vp's
- * lock separately afterward, but only if it
- * does not share the lower vp's lock.
+ * the lower vp.
*/
error = vget(vp, LK_INTERLOCK | LK_NOWAIT);
if (error) {
kpause("layerfs", false, 1, NULL);
mutex_enter(&lmp->layerm_hashlock);
goto loop;
}
- LAYERFS_UPPERLOCK(vp, LK_EXCLUSIVE, error);
return (vp);
}
}
return NULL;
@@ -248,23 +244,8 @@ layer_node_alloc(struct mount *mp, struc
return (0);
}
/*
- * Now lock the new node. We rely on the fact that we were passed
- * a locked vnode. If the lower node is exporting a struct lock
- * (v_vnlock != NULL) then we just set the upper v_vnlock to the
- * lower one, and both are now locked. If the lower node is exporting
- * NULL, then we copy that up and manually lock the upper node.
- *
- * LAYERFS_UPPERLOCK already has the test, so we use it after copying
- * up the v_vnlock from below.
- */
-
- vp->v_vnlock = lowervp->v_vnlock;
- LAYERFS_UPPERLOCK(vp, LK_EXCLUSIVE, error);
- KASSERT(error == 0);
-
- /*
* Insert the new node into the hash.
* Add a reference to the lower node.
*/
Index: sys/fs/unionfs/unionfs_subr.c
===================================================================
RCS file: /cvsroot/src/sys/fs/unionfs/unionfs_subr.c,v
retrieving revision 1.5
diff -p -u -4 -r1.5 unionfs_subr.c
--- sys/fs/unionfs/unionfs_subr.c 8 Jan 2010 11:35:09 -0000 1.5
+++ sys/fs/unionfs/unionfs_subr.c 23 May 2010 13:56:54 -0000
@@ -109,12 +109,8 @@ unionfs_nodeget(struct mount *mp, struct
unp->un_vnode = vp;
unp->un_uppervp = uppervp;
unp->un_lowervp = lowervp;
unp->un_dvp = dvp;
- if (uppervp != NULLVP)
- vp->v_vnlock = uppervp->v_vnlock;
- else
- vp->v_vnlock = lowervp->v_vnlock;
if (path != NULL) {
unp->un_path = (char *)
malloc(cnp->cn_namelen +1, M_UNIONFSPATH, M_WAITOK|M_ZERO);
@@ -155,9 +151,8 @@ unionfs_noderem(struct vnode *vp)
unp = VTOUNIONFS(vp);
lvp = unp->un_lowervp;
uvp = unp->un_uppervp;
unp->un_lowervp = unp->un_uppervp = NULLVP;
- vp->v_vnlock = &(vp->v_lock);
vp->v_data = NULL;
if (lvp != NULLVP)
vrele(lvp);
@@ -489,11 +484,10 @@ unionfs_node_update(struct unionfs_node
* lock update
*/
mutex_enter(&vp->v_interlock);
unp->un_uppervp = uvp;
- vp->v_vnlock = uvp->v_vnlock;
- lockcnt = lvp->v_vnlock->vl_recursecnt +
- rw_write_held(&lvp->v_vnlock->vl_lock);
+ lockcnt = lvp->v_lock.vl_recursecnt +
+ rw_write_held(&lvp->v_lock.vl_lock);
if (lockcnt <= 0)
panic("unionfs: no exclusive lock");
mutex_exit(&vp->v_interlock);
for (count = 1; count < lockcnt; count++)
Index: sys/fs/union/union_subr.c
===================================================================
RCS file: /cvsroot/src/sys/fs/union/union_subr.c,v
retrieving revision 1.35
diff -p -u -4 -r1.35 union_subr.c
--- sys/fs/union/union_subr.c 8 Jan 2010 11:35:09 -0000 1.35
+++ sys/fs/union/union_subr.c 23 May 2010 13:56:54 -0000
@@ -531,9 +531,8 @@ loop:
(*vpp)->v_data = malloc(sizeof(struct union_node), M_TEMP, M_WAITOK);
(*vpp)->v_vflag |= vflag;
(*vpp)->v_iflag |= iflag;
- (*vpp)->v_vnlock = NULL; /* Make upper layers call VOP_LOCK */
if (uppervp)
(*vpp)->v_type = uppervp->v_type;
else
(*vpp)->v_type = lowervp->v_type;
Index: sys/miscfs/genfs/genfs_vnops.c
===================================================================
RCS file: /cvsroot/src/sys/miscfs/genfs/genfs_vnops.c,v
retrieving revision 1.177
diff -p -u -4 -r1.177 genfs_vnops.c
--- sys/miscfs/genfs/genfs_vnops.c 8 Apr 2010 15:56:26 -0000 1.177
+++ sys/miscfs/genfs/genfs_vnops.c 23 May 2010 13:56:55 -0000
@@ -293,9 +293,9 @@ genfs_lock(void *v)
flags &= ~LK_INTERLOCK;
mutex_exit(&vp->v_interlock);
}
- return (vlockmgr(vp->v_vnlock, flags));
+ return (vlockmgr(&vp->v_lock, flags));
}
/*
* Unlock the node.
@@ -310,9 +310,9 @@ genfs_unlock(void *v)
struct vnode *vp = ap->a_vp;
KASSERT(ap->a_flags == 0);
- return (vlockmgr(vp->v_vnlock, LK_RELEASE));
+ return (vlockmgr(&vp->v_lock, LK_RELEASE));
}
/*
* Return whether or not the node is locked.
@@ -324,9 +324,9 @@ genfs_islocked(void *v)
struct vnode *a_vp;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
- return (vlockstatus(vp->v_vnlock));
+ return (vlockstatus(&vp->v_lock));
}
/*
* Stubs to use when there is no locking to be done on the underlying object.
Index: sys/fs/udf/udf_subr.c
===================================================================
RCS file: /cvsroot/src/sys/fs/udf/udf_subr.c,v
retrieving revision 1.104
diff -p -u -4 -r1.104 udf_subr.c
--- sys/fs/udf/udf_subr.c 25 Feb 2010 16:15:57 -0000 1.104
+++ sys/fs/udf/udf_subr.c 23 May 2010 13:56:54 -0000
@@ -5451,9 +5451,9 @@ udf_get_node(struct udf_mount *ump, stru
DPRINTF(NODE, ("\tnode fe/efe failed!\n"));
/* recycle udf_node */
udf_dispose_node(udf_node);
- vlockmgr(nvp->v_vnlock, LK_RELEASE);
+ vlockmgr(&nvp->v_lock, LK_RELEASE);
nvp->v_data = NULL;
ungetnewvnode(nvp);
return EINVAL; /* error code ok? */
@@ -5547,9 +5547,9 @@ udf_get_node(struct udf_mount *ump, stru
if (error) {
/* recycle udf_node */
udf_dispose_node(udf_node);
- vlockmgr(nvp->v_vnlock, LK_RELEASE);
+ vlockmgr(&nvp->v_lock, LK_RELEASE);
nvp->v_data = NULL;
ungetnewvnode(nvp);
return EINVAL; /* error code ok? */
@@ -5882,9 +5882,9 @@ udf_create_node_raw(struct vnode *dvp, s
error_out_unreserve:
udf_do_unreserve_space(ump, NULL, vpart_num, 1);
error_out_unlock:
- vlockmgr(nvp->v_vnlock, LK_RELEASE);
+ vlockmgr(&nvp->v_lock, LK_RELEASE);
error_out_unget:
nvp->v_data = NULL;
ungetnewvnode(nvp);
Index: sys/kern/vfs_subr.c
===================================================================
RCS file: /cvsroot/src/sys/kern/vfs_subr.c,v
retrieving revision 1.400
diff -p -u -4 -r1.400 vfs_subr.c
--- sys/kern/vfs_subr.c 30 Apr 2010 10:03:13 -0000 1.400
+++ sys/kern/vfs_subr.c 23 May 2010 13:56:55 -0000
@@ -647,9 +647,8 @@ getnewvnode(enum vtagtype tag, struct mo
KASSERT(LIST_EMPTY(&vp->v_nclist));
KASSERT(LIST_EMPTY(&vp->v_dnclist));
vp->v_type = VNON;
- vp->v_vnlock = &vp->v_lock;
vp->v_tag = tag;
vp->v_op = vops;
insmntque(vp, mp);
*vpp = vp;
@@ -1944,9 +1943,8 @@ vclean(vnode_t *vp, int flags)
/* Done with purge, notify sleepers of the grim news. */
mutex_enter(&vp->v_interlock);
vp->v_op = dead_vnodeop_p;
vp->v_tag = VT_NON;
- vp->v_vnlock = &vp->v_lock;
KNOTE(&vp->v_klist, NOTE_REVOKE);
vp->v_iflag &= ~(VI_XLOCK | VI_FREEING);
vp->v_vflag &= ~VV_LOCKSWORK;
if ((flags & DOCLOSE) != 0) {
@@ -2720,9 +2718,9 @@ vprint(const char *label, struct vnode *
struct vnlock *vl;
char bf[96];
int flag;
- vl = (vp->v_vnlock != NULL ? vp->v_vnlock : &vp->v_lock);
+ vl = &vp->v_lock;
flag = vp->v_iflag | vp->v_vflag | vp->v_uflag;
snprintb(bf, sizeof(bf), vnode_flagbits, flag);
if (label != NULL)
@@ -3288,9 +3286,9 @@ vfs_vnode_print(struct vnode *vp, int fu
ARRAY_PRINT(vp->v_tag, vnode_tags), vp->v_tag,
ARRAY_PRINT(vp->v_type, vnode_types), vp->v_type,
vp->v_mount, vp->v_mountedhere);
- (*pr)("v_lock %p v_vnlock %p\n", &vp->v_lock, vp->v_vnlock);
+ (*pr)("v_lock %p\n", &vp->v_lock);
if (full) {
struct buf *bp;
Index: sys/kern/vfs_vnops.c
===================================================================
RCS file: /cvsroot/src/sys/kern/vfs_vnops.c,v
retrieving revision 1.171
diff -p -u -4 -r1.171 vfs_vnops.c
--- sys/kern/vfs_vnops.c 23 Apr 2010 15:38:46 -0000 1.171
+++ sys/kern/vfs_vnops.c 23 May 2010 13:56:55 -0000
@@ -826,12 +826,10 @@ vn_closefile(file_t *fp)
*/
u_int
vn_setrecurse(struct vnode *vp)
{
- struct vnlock *lkp;
- lkp = (vp->v_vnlock != NULL ? vp->v_vnlock : &vp->v_lock);
- atomic_inc_uint(&lkp->vl_canrecurse);
+ atomic_inc_uint(&vp->v_lock.vl_canrecurse);
return 0;
}
@@ -840,12 +838,10 @@ vn_setrecurse(struct vnode *vp)
*/
void
vn_restorerecurse(struct vnode *vp, u_int flags)
{
- struct vnlock *lkp;
- lkp = (vp->v_vnlock != NULL ? vp->v_vnlock : &vp->v_lock);
- atomic_dec_uint(&lkp->vl_canrecurse);
+ atomic_dec_uint(&vp->v_lock.vl_canrecurse);
}
/*
* Simplified in-kernel wrapper calls for extended attribute access.
Index: sys/sys/param.h
===================================================================
RCS file: /cvsroot/src/sys/sys/param.h,v
retrieving revision 1.364
diff -p -u -4 -r1.364 param.h
--- sys/sys/param.h 2 May 2010 05:31:47 -0000 1.364
+++ sys/sys/param.h 23 May 2010 13:56:59 -0000
@@ -62,9 +62,9 @@
* NetBSD-2.0H (200080000) was changed on 20041001 to:
* 2.99.9 (299000900)
*/
-#define __NetBSD_Version__ 599002900 /* NetBSD 5.99.29 */
+#define __NetBSD_Version__ 599003000 /* NetBSD 5.99.30 */
#define __NetBSD_Prereq__(M,m,p) (((((M) * 100000000) + \
(m) * 1000000) + (p) * 100) <= __NetBSD_Version__)
Index: sys/sys/vnode.h
===================================================================
RCS file: /cvsroot/src/sys/sys/vnode.h,v
retrieving revision 1.218
diff -p -u -4 -r1.218 vnode.h
--- sys/sys/vnode.h 30 Apr 2010 10:03:14 -0000 1.218
+++ sys/sys/vnode.h 23 May 2010 13:57:02 -0000
@@ -138,9 +138,9 @@ struct vnlock {
* m mntvnode_lock
* n namecache_lock
* s syncer_data_lock
* u locked by underlying filesystem
- * v v_vnlock
+ * v v_lock
* x v_interlock + bufcache_lock to modify, either to inspect
*
* Each underlying filesystem allocates its own private area and hangs
* it from v_data.
@@ -176,9 +176,8 @@ struct vnode {
} v_un;
enum vtype v_type; /* :: vnode type */
enum vtagtype v_tag; /* :: type of underlying data */
struct vnlock v_lock; /* v: lock for this vnode */
- struct vnlock *v_vnlock; /* v: pointer to lock */
void *v_data; /* :: private data for fs */
struct klist v_klist; /* i: notes attached to vnode */
};
#define v_usecount v_uobj.uo_refs
@@ -192,22 +191,15 @@ struct vnode {
typedef struct vnodelst vnodelst_t;
typedef struct vnode vnode_t;
/*
- * All vnode locking operations should use vp->v_vnlock. For leaf filesystems
- * (such as ffs, lfs, msdosfs, etc), vp->v_vnlock = &vp->v_lock. For
- * stacked filesystems, vp->v_vnlock may equal lowervp->v_vnlock.
- *
- * vp->v_vnlock may also be NULL, which indicates that a leaf node does not
- * export a struct lock for vnode locking. Stacked filesystems (such as
- * nullfs) must call the underlying fs for locking. See layerfs_ routines
- * for examples.
+ * All vnode locking operations should use vp->v_lock.
*
* All filesystems must (pretend to) understand lockmanager flags.
*/
/*
- * Vnode flags. The first set are locked by vp->v_vnlock or are stable.
+ * Vnode flags. The first set are locked by vp->v_lock or are stable.
* VSYSTEM is only used to skip vflush()ing quota files. VISTTY is used
* when reading dead vnodes.
*/
#define VV_ROOT 0x00000001 /* root of its file system */
Index: share/man/man9/vnode.9
===================================================================
RCS file: /cvsroot/src/share/man/man9/vnode.9,v
retrieving revision 1.47
diff -p -u -4 -r1.47 vnode.9
--- share/man/man9/vnode.9 21 Feb 2010 13:33:03 -0000 1.47
+++ share/man/man9/vnode.9 23 May 2010 13:56:20 -0000
@@ -26,9 +26,9 @@
.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
-.Dd February 11, 2010
+.Dd May 22, 2010
.Dt VNODE 9
.Os
.Sh NAME
.Nm vnode ,
@@ -160,9 +160,8 @@ struct vnode {
struct nqlease *v_lease; /* Soft ref to lease */
enum vtype v_type; /* vnode type */
enum vtagtype v_tag; /* underlying data type */
struct lock v_lock; /* lock for this vnode */
- struct lock *v_vnlock; /* ptr to vnode lock */
void *v_data; /* private data for fs */
struct klist v_klist; /* knotes attached to vnode */
};
.Ed
@@ -433,9 +432,9 @@ universal disk format file system
systemV boot file system
.El
.Pp
All vnode locking operations use
-.Em v_vnlock .
+.Em v_lock .
This lock is acquired by calling
.Xr vn_lock 9
and released by calling
.Xr VOP_UNLOCK 9 .
@@ -461,33 +460,16 @@ locking.
Multiple-reader locking functions equivalently only in the presence
of big-lock SMP locking or a uni-processor machine.
The lock may be held while sleeping.
While the
-.Em v_vnlock
+.Em v_lock
is acquired, the holder is guaranteed that the vnode will not be
reclaimed or invalidated.
Most file system functions require that you hold the vnode lock on entry.
See
.Xr lock 9
for details on the kernel locking API.
.Pp
-For leaf file systems (such as ffs, lfs, msdosfs, etc),
-.Em v_vnlock
-will point to
-.Em v_lock .
-For stacked file systems,
-.Em v_vnlock
-will generally point to
-.Em v_vlock
-of the lowest file system.
-Additionally, the implementation of the vnode lock is the
-responsibility of the individual file systems and
-.Em v_vnlock
-may also be NULL indicating that a leaf node does not export a lock
-for vnode locking.
-In this case, stacked file systems (such as nullfs) must call the
-underlying file system directly for locking.
-.Pp
Each file system underlying a vnode allocates its own private area and
hangs it from
.Em v_data .
.Pp
Home |
Main Index |
Thread Index |
Old Index