Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern Change the freelists to lrulists, all vnodes are al...



details:   https://anonhg.NetBSD.org/src/rev/0fd0bf973980
branches:  trunk
changeset: 349560:0fd0bf973980
user:      hannken <hannken%NetBSD.org@localhost>
date:      Wed Dec 14 15:49:35 2016 +0000

description:
Change the freelists to lrulists, all vnodes are always on one
of the lists.  Speeds up namei on cached vnodes by ~3 percent.

Merge "vrele_thread" into "vdrain_thread" so we have one thread
working on the lrulists.  Adapt vfs_drainvnodes() to always wait
for a complete cycle of vdrain_thread().

diffstat:

 sys/kern/vfs_vnode.c |  395 +++++++++++++++++++++++---------------------------
 1 files changed, 183 insertions(+), 212 deletions(-)

diffs (truncated from 592 to 300 lines):

diff -r 61403ca387d5 -r 0fd0bf973980 sys/kern/vfs_vnode.c
--- a/sys/kern/vfs_vnode.c      Wed Dec 14 15:48:54 2016 +0000
+++ b/sys/kern/vfs_vnode.c      Wed Dec 14 15:49:35 2016 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: vfs_vnode.c,v 1.62 2016/12/14 15:48:55 hannken Exp $   */
+/*     $NetBSD: vfs_vnode.c,v 1.63 2016/12/14 15:49:35 hannken Exp $   */
 
 /*-
  * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
@@ -156,7 +156,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.62 2016/12/14 15:48:55 hannken Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.63 2016/12/14 15:49:35 hannken Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -188,22 +188,19 @@
 u_int                  numvnodes               __cacheline_aligned;
 
 /*
- * There are two free lists: one is for vnodes which have no buffer/page
- * references and one for those which do (i.e. v_holdcnt is non-zero).
- * Vnode recycling mechanism first attempts to look into the former list.
+ * There are three lru lists: one holds vnodes waiting for async release,
+ * one is for vnodes which have no buffer/page references and
+ * one for those which do (i.e. v_holdcnt is non-zero).
  */
-static kmutex_t                vnode_free_list_lock    __cacheline_aligned;
-static vnodelst_t      vnode_free_list         __cacheline_aligned;
-static vnodelst_t      vnode_hold_list         __cacheline_aligned;
+static vnodelst_t      lru_vrele_list          __cacheline_aligned;
+static vnodelst_t      lru_free_list           __cacheline_aligned;
+static vnodelst_t      lru_hold_list           __cacheline_aligned;
+static kmutex_t                vdrain_lock             __cacheline_aligned;
 static kcondvar_t      vdrain_cv               __cacheline_aligned;
-
-static vnodelst_t      vrele_list              __cacheline_aligned;
-static kmutex_t                vrele_lock              __cacheline_aligned;
-static kcondvar_t      vrele_cv                __cacheline_aligned;
-static lwp_t *         vrele_lwp               __cacheline_aligned;
-static int             vrele_pending           __cacheline_aligned;
-static int             vrele_gen               __cacheline_aligned;
-
+static int             vdrain_gen;
+static kcondvar_t      vdrain_gen_cv;
+static bool            vdrain_retry;
+static lwp_t *         vdrain_lwp;
 SLIST_HEAD(hashhead, vnode_impl);
 static struct {
        kmutex_t        lock;
@@ -214,15 +211,15 @@
        pool_cache_t    pool;
 }                      vcache                  __cacheline_aligned;
 
-static int             cleanvnode(void);
-static vnode_impl_t *vcache_alloc(void);
+static void            lru_requeue(vnode_t *, vnodelst_t *);
+static vnodelst_t *    lru_which(vnode_t *);
+static vnode_impl_t *  vcache_alloc(void);
 static void            vcache_free(vnode_impl_t *);
 static void            vcache_init(void);
 static void            vcache_reinit(void);
 static void            vcache_reclaim(vnode_t *);
 static void            vrelel(vnode_t *, int);
 static void            vdrain_thread(void *);
-static void            vrele_thread(void *);
 static void            vnpanic(vnode_t *, const char *, ...)
     __printflike(2, 3);
 
@@ -357,22 +354,18 @@
        KASSERT(dead_rootmount != NULL);
        dead_rootmount->mnt_iflag = IMNT_MPSAFE;
 
-       mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
-       TAILQ_INIT(&vnode_free_list);
-       TAILQ_INIT(&vnode_hold_list);
-       TAILQ_INIT(&vrele_list);
+       mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
+       TAILQ_INIT(&lru_free_list);
+       TAILQ_INIT(&lru_hold_list);
+       TAILQ_INIT(&lru_vrele_list);
 
        vcache_init();
 
-       mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
        cv_init(&vdrain_cv, "vdrain");
-       cv_init(&vrele_cv, "vrele");
+       cv_init(&vdrain_gen_cv, "vdrainwt");
        error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
-           NULL, NULL, "vdrain");
+           NULL, &vdrain_lwp, "vdrain");
        KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
-       error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
-           NULL, &vrele_lwp, "vrele");
-       KASSERTMSG((error == 0), "kthread_create(vrele) failed: %d", error);
 }
 
 /*
@@ -420,53 +413,74 @@
 }
 
 /*
- * cleanvnode: grab a vnode from freelist, clean and free it.
- *
- * => Releases vnode_free_list_lock.
+ * Return the lru list this node should be on.
+ */
+static vnodelst_t *
+lru_which(vnode_t *vp)
+{
+
+       KASSERT(mutex_owned(vp->v_interlock));
+
+       if (vp->v_holdcnt > 0)
+               return &lru_hold_list;
+       else
+               return &lru_free_list;
+}
+
+/*
+ * Put vnode to end of given list.
+ * Both the current and the new list may be NULL, used on vnode alloc/free.
+ * Adjust numvnodes and signal vdrain thread if there is work.
  */
-static int
-cleanvnode(void)
+static void
+lru_requeue(vnode_t *vp, vnodelst_t *listhd)
 {
-       vnode_t *vp;
-       vnode_impl_t *vi;
-       vnodelst_t *listhd;
+       vnode_impl_t *node;
+
+       mutex_enter(&vdrain_lock);
+       node = VNODE_TO_VIMPL(vp);
+       if (node->vi_lrulisthd != NULL)
+               TAILQ_REMOVE(node->vi_lrulisthd, node, vi_lrulist);
+       else
+               numvnodes++;
+       node->vi_lrulisthd = listhd;
+       if (node->vi_lrulisthd != NULL)
+               TAILQ_INSERT_TAIL(node->vi_lrulisthd, node, vi_lrulist);
+       else
+               numvnodes--;
+       if (numvnodes > desiredvnodes || listhd == &lru_vrele_list)
+               cv_broadcast(&vdrain_cv);
+       mutex_exit(&vdrain_lock);
+}
+
+/*
+ * Reclaim a cached vnode.  Used from vdrain_thread only.
+ */
+static __inline void
+vdrain_remove(vnode_t *vp)
+{
        struct mount *mp;
 
-       KASSERT(mutex_owned(&vnode_free_list_lock));
-
-       listhd = &vnode_free_list;
-try_nextlist:
-       TAILQ_FOREACH(vi, listhd, vi_lrulist) {
-               vp = VIMPL_TO_VNODE(vi);
-               /*
-                * It's safe to test v_usecount and v_iflag
-                * without holding the interlock here, since
-                * these vnodes should never appear on the
-                * lists.
-                */
-               KASSERT(vp->v_usecount == 0);
-               KASSERT(vi->vi_lrulisthd == listhd);
+       KASSERT(mutex_owned(&vdrain_lock));
 
-               if (!mutex_tryenter(vp->v_interlock))
-                       continue;
-               mp = vp->v_mount;
-               if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
-                       mutex_exit(vp->v_interlock);
-                       continue;
-               }
-               break;
+       /* Probe usecount (unlocked). */
+       if (vp->v_usecount > 0)
+               return;
+       /* Try v_interlock -- we lock the wrong direction! */
+       if (!mutex_tryenter(vp->v_interlock))
+               return;
+       /* Probe usecount and state. */
+       if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_ACTIVE) {
+               mutex_exit(vp->v_interlock);
+               return;
        }
-
-       if (vi == NULL) {
-               if (listhd == &vnode_free_list) {
-                       listhd = &vnode_hold_list;
-                       goto try_nextlist;
-               }
-               mutex_exit(&vnode_free_list_lock);
-               return EBUSY;
+       mp = vp->v_mount;
+       if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
+               mutex_exit(vp->v_interlock);
+               return;
        }
-
-       mutex_exit(&vnode_free_list_lock);
+       vdrain_retry = true;
+       mutex_exit(&vdrain_lock);
 
        if (vget(vp, 0, true /* wait */) == 0) {
                if (!vrecycle(vp))
@@ -474,59 +488,97 @@
        }
        fstrans_done(mp);
 
-       return 0;
+       mutex_enter(&vdrain_lock);
 }
 
 /*
- * Helper thread to keep the number of vnodes below desiredvnodes.
+ * Release a cached vnode.  Used from vdrain_thread only.
+ */
+static __inline void
+vdrain_vrele(vnode_t *vp)
+{
+       vnode_impl_t *node = VNODE_TO_VIMPL(vp);
+       struct mount *mp;
+
+       KASSERT(mutex_owned(&vdrain_lock));
+
+       /*
+        * Safe to take v_interlock -- no other thread will
+        * lock v_interlock -> vdrain_lock as usecount > 0.
+        */
+       mutex_enter(vp->v_interlock);
+       mp = vp->v_mount;
+       if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
+               mutex_exit(vp->v_interlock);
+               return;
+       }
+
+       /* First put the vnode back onto its lru list. */
+       KASSERT(node->vi_lrulisthd == &lru_vrele_list);
+       TAILQ_REMOVE(node->vi_lrulisthd, node, vi_lrulist);
+       node->vi_lrulisthd = lru_which(vp);
+       TAILQ_INSERT_TAIL(node->vi_lrulisthd, node, vi_lrulist);
+
+       vdrain_retry = true;
+       mutex_exit(&vdrain_lock);
+
+       vrelel(vp, 0);
+       fstrans_done(mp);
+
+       mutex_enter(&vdrain_lock);
+}
+
+/*
+ * Helper thread to keep the number of vnodes below desiredvnodes
+ * and release vnodes from asynchronous vrele.
  */
 static void
 vdrain_thread(void *cookie)
 {
-       int error;
+       vnodelst_t *listhd[] = {
+           &lru_vrele_list, &lru_free_list, &lru_hold_list
+       };
+       int i;
+       u_int target;
+       vnode_impl_t *node, *marker;
 
-       mutex_enter(&vnode_free_list_lock);
+       marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
+
+       mutex_enter(&vdrain_lock);
 
        for (;;) {
-               cv_timedwait(&vdrain_cv, &vnode_free_list_lock, hz);
-               while (numvnodes > desiredvnodes) {
-                       error = cleanvnode();
-                       if (error)
-                               kpause("vndsbusy", false, hz, NULL);
-                       mutex_enter(&vnode_free_list_lock);
-                       if (error)
-                               break;
+               vdrain_retry = false;
+               target = desiredvnodes - desiredvnodes/10;
+
+               for (i = 0; i < __arraycount(listhd); i++) {
+                       TAILQ_INSERT_HEAD(listhd[i], marker, vi_lrulist);
+                       while ((node = TAILQ_NEXT(marker, vi_lrulist))) {
+                               TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
+                               TAILQ_INSERT_AFTER(listhd[i], node, marker,
+                                   vi_lrulist);
+                               if (listhd[i] == &lru_vrele_list)



Home | Main Index | Thread Index | Old Index