Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern Take the vnode lock before the vnode is marked VI_C...



details:   https://anonhg.NetBSD.org/src/rev/55808d7b354f
branches:  trunk
changeset: 341574:55808d7b354f
user:      hannken <hannken%NetBSD.org@localhost>
date:      Thu Nov 12 11:35:42 2015 +0000

description:
Take the vnode lock before the vnode is marked VI_CHANGING and fed
to vclean().  Prevents a deadlock with two null mounts on the same
physical mount where one thread tries to vclean() a layer node and
another thread tries to vget() a layer node pointing to the same
physical node.

Fixes PR kern/50375 layerfs (nullfs) locking problem leading to livelock

diffstat:

 sys/kern/vfs_vnode.c |  46 +++++++++++++++++++++++++++-------------------
 1 files changed, 27 insertions(+), 19 deletions(-)

diffs (131 lines):

diff -r c4e1b6aebeb7 -r 55808d7b354f sys/kern/vfs_vnode.c
--- a/sys/kern/vfs_vnode.c      Thu Nov 12 10:49:35 2015 +0000
+++ b/sys/kern/vfs_vnode.c      Thu Nov 12 11:35:42 2015 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: vfs_vnode.c,v 1.45 2015/07/12 08:11:28 hannken Exp $   */
+/*     $NetBSD: vfs_vnode.c,v 1.46 2015/11/12 11:35:42 hannken Exp $   */
 
 /*-
  * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
@@ -116,7 +116,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.45 2015/07/12 08:11:28 hannken Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.46 2015/11/12 11:35:42 hannken Exp $");
 
 #define _VFS_VNODE_PRIVATE
 
@@ -325,15 +325,17 @@
                KASSERT((vp->v_iflag & VI_CLEAN) == 0);
                KASSERT(vp->v_freelisthd == listhd);
 
-               if (!mutex_tryenter(vp->v_interlock))
+               if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
                        continue;
-               if ((vp->v_iflag & VI_XLOCK) != 0) {
-                       mutex_exit(vp->v_interlock);
+               if (!mutex_tryenter(vp->v_interlock)) {
+                       VOP_UNLOCK(vp);
                        continue;
                }
+               KASSERT((vp->v_iflag & VI_XLOCK) == 0);
                mp = vp->v_mount;
                if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
                        mutex_exit(vp->v_interlock);
+                       VOP_UNLOCK(vp);
                        continue;
                }
                break;
@@ -643,6 +645,11 @@
                 * Note that VOP_INACTIVE() will drop the vnode lock.
                 */
                VOP_INACTIVE(vp, &recycle);
+               if (recycle) {
+                       /* vclean() below will drop the lock. */
+                       if (vn_lock(vp, LK_EXCLUSIVE) != 0)
+                               recycle = false;
+               }
                mutex_enter(vp->v_interlock);
                if (!recycle) {
                        if (vtryrele(vp)) {
@@ -867,6 +874,7 @@
 /*
  * Disassociate the underlying file system from a vnode.
  *
+ * Must be called with vnode locked and will return unlocked.
  * Must be called with the interlock held, and will return with it held.
  */
 static void
@@ -876,26 +884,18 @@
        bool recycle, active;
        int error;
 
+       KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
+           VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
        KASSERT(mutex_owned(vp->v_interlock));
        KASSERT((vp->v_iflag & VI_MARKER) == 0);
+       KASSERT((vp->v_iflag & (VI_XLOCK | VI_CLEAN)) == 0);
        KASSERT(vp->v_usecount != 0);
 
-       /* If already clean, nothing to do. */
-       if ((vp->v_iflag & VI_CLEAN) != 0) {
-               return;
-       }
-
        active = (vp->v_usecount > 1);
-       mutex_exit(vp->v_interlock);
-
-       vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
-
        /*
         * Prevent the vnode from being recycled or brought into use
         * while we clean it out.
         */
-       mutex_enter(vp->v_interlock);
-       KASSERT((vp->v_iflag & (VI_XLOCK | VI_CLEAN)) == 0);
        vp->v_iflag |= VI_XLOCK;
        if (vp->v_iflag & VI_EXECMAP) {
                atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
@@ -972,23 +972,26 @@
 vrecycle(vnode_t *vp)
 {
 
+       if (vn_lock(vp, LK_EXCLUSIVE) != 0)
+               return false;
+
        mutex_enter(vp->v_interlock);
 
        KASSERT((vp->v_iflag & VI_MARKER) == 0);
 
        if (vp->v_usecount != 1) {
                mutex_exit(vp->v_interlock);
+               VOP_UNLOCK(vp);
                return false;
        }
        if ((vp->v_iflag & VI_CHANGING) != 0)
                vwait(vp, VI_CHANGING);
        if (vp->v_usecount != 1) {
                mutex_exit(vp->v_interlock);
+               VOP_UNLOCK(vp);
                return false;
-       } else if ((vp->v_iflag & VI_CLEAN) != 0) {
-               mutex_exit(vp->v_interlock);
-               return true;
        }
+       KASSERT((vp->v_iflag & VI_CLEAN) == 0);
        vp->v_iflag |= VI_CHANGING;
        vclean(vp);
        vrelel(vp, VRELEL_CHANGING_SET);
@@ -1036,6 +1039,11 @@
 vgone(vnode_t *vp)
 {
 
+       if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
+               KASSERT((vp->v_iflag & VI_CLEAN) != 0);
+               vrele(vp);
+       }
+
        mutex_enter(vp->v_interlock);
        if ((vp->v_iflag & VI_CHANGING) != 0)
                vwait(vp, VI_CHANGING);



Home | Main Index | Thread Index | Old Index