Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/ad-namecache]: src/sys vnodes:



details:   https://anonhg.NetBSD.org/src/rev/5ce547fda531
branches:  ad-namecache
changeset: 983010:5ce547fda531
user:      ad <ad%NetBSD.org@localhost>
date:      Fri Jan 24 16:05:22 2020 +0000

description:
vnodes:

- Have own v_usecount again, don't share the uvm_object's refcount.
- Cluster the members of vnode_t and vnode_impl_t in a cache-concious way.
- Go back to having vi_lock directly in vnode_impl_t.
- Go back to having v_usecount adjusted with atomics.
- Start adjusting v_holdcnt with atomics, too.
- Put all the namecache stuff back into vnode_impl_t.

diffstat:

 sys/kern/vfs_subr.c            |    8 +-
 sys/kern/vfs_vnode.c           |  139 ++++++++++++++++++++++++++++++++++------
 sys/miscfs/genfs/genfs_vnops.c |   28 ++++----
 sys/sys/vnode.h                |   55 ++++++---------
 sys/sys/vnode_impl.h           |   58 +++++++++++++---
 5 files changed, 202 insertions(+), 86 deletions(-)

diffs (truncated from 589 to 300 lines):

diff -r e56ac5280475 -r 5ce547fda531 sys/kern/vfs_subr.c
--- a/sys/kern/vfs_subr.c       Thu Jan 23 21:24:54 2020 +0000
+++ b/sys/kern/vfs_subr.c       Fri Jan 24 16:05:22 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: vfs_subr.c,v 1.478.2.1 2020/01/17 21:47:35 ad Exp $    */
+/*     $NetBSD: vfs_subr.c,v 1.478.2.2 2020/01/24 16:05:22 ad Exp $    */
 
 /*-
  * Copyright (c) 1997, 1998, 2004, 2005, 2007, 2008, 2019
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.478.2.1 2020/01/17 21:47:35 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.478.2.2 2020/01/24 16:05:22 ad Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_ddb.h"
@@ -1111,7 +1111,7 @@
            vp->v_usecount, vp->v_writecount, vp->v_holdcnt);
        (*pr)("%ssize %" PRIx64 " writesize %" PRIx64 " numoutput %d\n",
            prefix, vp->v_size, vp->v_writesize, vp->v_numoutput);
-       (*pr)("%sdata %p lock %p\n", prefix, vp->v_data, vip->vi_lock);
+       (*pr)("%sdata %p lock %p\n", prefix, vp->v_data, &vip->vi_lock);
 
        (*pr)("%sstate %s key(%p %zd)", prefix, vstate_name(vip->vi_state),
            vip->vi_key.vk_mount, vip->vi_key.vk_key_len);
@@ -1544,7 +1544,7 @@
 
        for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp)) {
                TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) {
-                       if (vip->vi_lock == vlock ||
+                       if (&vip->vi_lock == vlock ||
                            VIMPL_TO_VNODE(vip)->v_interlock == vlock)
                                vfs_vnode_print(VIMPL_TO_VNODE(vip), full, pr);
                }
diff -r e56ac5280475 -r 5ce547fda531 sys/kern/vfs_vnode.c
--- a/sys/kern/vfs_vnode.c      Thu Jan 23 21:24:54 2020 +0000
+++ b/sys/kern/vfs_vnode.c      Fri Jan 24 16:05:22 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: vfs_vnode.c,v 1.105.2.4 2020/01/23 19:28:39 ad Exp $   */
+/*     $NetBSD: vfs_vnode.c,v 1.105.2.5 2020/01/24 16:05:22 ad Exp $   */
 
 /*-
  * Copyright (c) 1997-2011, 2019 The NetBSD Foundation, Inc.
@@ -142,10 +142,19 @@
  *     as vput(9), routines.  Common points holding references are e.g.
  *     file openings, current working directory, mount points, etc.  
  *
+ * Note on v_usecount & v_holdcnt and their locking
+ *
+ *     At nearly all points it is known that the counts could be zero,
+ *     the vnode_t::v_interlock will be held.  To change the counts away
+ *     from zero, the interlock must be held.  To change from a non-zero
+ *     value to zero, again the interlock must be held.
+ *
+ *     Changing the usecount from a non-zero value to a non-zero value can
+ *     safely be done using atomic operations, without the interlock held.
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.105.2.4 2020/01/23 19:28:39 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.105.2.5 2020/01/24 16:05:22 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -669,6 +678,27 @@
 }
 
 /*
+ * Try to drop reference on a vnode.  Abort if we are releasing the
+ * last reference.  Note: this _must_ succeed if not the last reference.
+ */
+static bool
+vtryrele(vnode_t *vp)
+{
+       u_int use, next;
+
+       for (use = vp->v_usecount;; use = next) {
+               if (__predict_false(use == 1)) {
+                       return false;
+               }
+               KASSERT(use > 1);
+               next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
+               if (__predict_true(next == use)) {
+                       return true;
+               }
+       }
+}
+
+/*
  * vput: unlock and release the reference.
  */
 void
@@ -676,7 +706,20 @@
 {
        int lktype;
 
-       if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
+       /*
+        * Do an unlocked check of v_usecount.  If it looks like we're not
+        * about to drop the last reference, then unlock the vnode and try
+        * to drop the reference.  If it ends up being the last reference
+        * after all, we dropped the lock when we shouldn't have.  vrelel()
+        * can fix it all up.  Most of the time this will all go to plan.
+        */
+       if (vp->v_usecount > 1) {
+               VOP_UNLOCK(vp);
+               if (vtryrele(vp)) {
+                       return;
+               }
+               lktype = LK_NONE;
+       } else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
                lktype = LK_EXCLUSIVE;
        } else {
                lktype = VOP_ISLOCKED(vp);
@@ -708,11 +751,10 @@
         * If not the last reference, just drop the reference count
         * and unlock.
         */
-       if (vp->v_usecount > 1) {
+       if (vtryrele(vp)) {
                if (lktype != LK_NONE) {
                        VOP_UNLOCK(vp);
                }
-               vp->v_usecount--;
                mutex_exit(vp->v_interlock);
                return;
        }
@@ -792,8 +834,7 @@
                mutex_enter(vp->v_interlock);
                if (!recycle) {
                        VOP_UNLOCK(vp);
-                       if (vp->v_usecount > 1) {
-                               vp->v_usecount--;
+                       if (vtryrele(vp)) {
                                mutex_exit(vp->v_interlock);
                                return;
                        }
@@ -820,8 +861,7 @@
                KASSERT(vp->v_usecount > 0);
        }
 
-       vp->v_usecount--;
-       if (vp->v_usecount != 0) {
+       if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
                /* Gained another reference while being reclaimed. */
                mutex_exit(vp->v_interlock);
                return;
@@ -848,6 +888,9 @@
 vrele(vnode_t *vp)
 {
 
+       if (vtryrele(vp)) {
+               return;
+       }
        mutex_enter(vp->v_interlock);
        vrelel(vp, 0, LK_NONE);
 }
@@ -859,6 +902,9 @@
 vrele_async(vnode_t *vp)
 {
 
+       if (vtryrele(vp)) {
+               return;
+       }
        mutex_enter(vp->v_interlock);
        vrelel(vp, VRELEL_ASYNC, LK_NONE);
 }
@@ -873,9 +919,7 @@
 
        KASSERT(vp->v_usecount != 0);
 
-       mutex_enter(vp->v_interlock);
-       vp->v_usecount++;
-       mutex_exit(vp->v_interlock);
+       atomic_inc_uint(&vp->v_usecount);
 }
 
 /*
@@ -888,11 +932,34 @@
 
        KASSERT(mutex_owned(vp->v_interlock));
 
-       if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0)
+       if (atomic_inc_uint_nv(&vp->v_holdcnt) == 1 && vp->v_usecount == 0)
                lru_requeue(vp, lru_which(vp));
 }
 
 /*
+ * Page or buffer structure gets a reference.
+ */
+void
+vhold(vnode_t *vp)
+{
+       int hold, next;
+
+       for (hold = vp->v_holdcnt;; hold = next) {
+               if (__predict_false(hold == 0)) {
+                       break;
+               }
+               next = atomic_cas_uint(&vp->v_holdcnt, hold, hold + 1);
+               if (__predict_true(next == hold)) {
+                       return;
+               }
+       }
+
+       mutex_enter(vp->v_interlock);
+       vholdl(vp);
+       mutex_exit(vp->v_interlock);
+}
+
+/*
  * Page or buffer structure frees a reference.
  * Called with v_interlock held.
  */
@@ -906,12 +973,35 @@
                vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
        }
 
-       vp->v_holdcnt--;
-       if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
+       if (atomic_dec_uint_nv(&vp->v_holdcnt) == 0 && vp->v_usecount == 0)
                lru_requeue(vp, lru_which(vp));
 }
 
 /*
+ * Page or buffer structure frees a reference.
+ */
+void
+holdrele(vnode_t *vp)
+{
+       int hold, next;
+
+       for (hold = vp->v_holdcnt;; hold = next) {
+               if (__predict_false(hold == 1)) {
+                       break;
+               }
+               KASSERT(hold > 1);
+               next = atomic_cas_uint(&vp->v_holdcnt, hold, hold - 1);
+               if (__predict_true(next == hold)) {
+                       return;
+               }
+       }
+
+       mutex_enter(vp->v_interlock);
+       holdrelel(vp);
+       mutex_exit(vp->v_interlock);
+}
+
+/*
  * Recycle an unused vnode if caller holds the last reference.
  */
 bool
@@ -1013,7 +1103,7 @@
        if (VSTATE_GET(vp) == VS_RECLAIMED) {
                mutex_exit(vp->v_interlock);
        } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
-               vp->v_usecount++;
+               atomic_inc_uint(&vp->v_usecount);
                mutex_exit(vp->v_interlock);
                vgone(vp);
        } else {
@@ -1068,8 +1158,8 @@
 vcache_init(void)
 {
 
-       vcache_pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
-           "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
+       vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
+           0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
        KASSERT(vcache_pool != NULL);
        mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
        cv_init(&vcache_cv, "vcache");
@@ -1139,7 +1229,7 @@
        vip = pool_cache_get(vcache_pool, PR_WAITOK);
        memset(vip, 0, sizeof(*vip));
 
-       vip->vi_lock = rw_obj_alloc();
+       rw_init(&vip->vi_lock);
 
        vp = VIMPL_TO_VNODE(vip);
        uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
@@ -1201,7 +1291,7 @@
        if (vp->v_type == VBLK || vp->v_type == VCHR)
                spec_node_destroy(vp);
 
-       rw_obj_free(vip->vi_lock);
+       rw_destroy(&vip->vi_lock);
        uvm_obj_destroy(&vp->v_uobj, true);
        cv_destroy(&vp->v_cv);
        cache_vnode_fini(vp);
@@ -1226,8 +1316,10 @@
                error = ENOENT;
        else if (__predict_false(VSTATE_GET(vp) != VS_LOADED))
                error = EBUSY;
+       else if (vp->v_usecount == 0)
+               vp->v_usecount = 1;
        else
-               vp->v_usecount++;



Home | Main Index | Thread Index | Old Index