Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/ad-namecache]: src/sys/sys Keeping the namecache lock next to the vnode ...



details:   https://anonhg.NetBSD.org/src/rev/539e4a27d266
branches:  ad-namecache
changeset: 1025067:539e4a27d266
user:      ad <ad%NetBSD.org@localhost>
date:      Sun Mar 22 14:23:27 2020 +0000

description:
Keeping the namecache lock next to the vnode lock has a repeatable
detrimental effect.  So shuffle some stuff around in vnode_impl to save
a bit of space, and give the namecache locks their own cache line.

diffstat:

 sys/sys/vnode_impl.h |  32 ++++++++++++++++----------------
 1 files changed, 16 insertions(+), 16 deletions(-)

diffs (59 lines):

diff -r 3322c888dc87 -r 539e4a27d266 sys/sys/vnode_impl.h
--- a/sys/sys/vnode_impl.h      Sun Mar 22 14:16:50 2020 +0000
+++ b/sys/sys/vnode_impl.h      Sun Mar 22 14:23:27 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: vnode_impl.h,v 1.19.2.5 2020/01/24 16:05:23 ad Exp $   */
+/*     $NetBSD: vnode_impl.h,v 1.19.2.6 2020/03/22 14:23:27 ad Exp $   */
 
 /*-
  * Copyright (c) 2016, 2019, 2020 The NetBSD Foundation, Inc.
@@ -76,6 +76,19 @@
        struct vcache_key vi_key;               /* c   vnode cache key */
 
        /*
+        * vnode cache, LRU and syncer.  This all changes with some
+        * regularity so keep it together.
+        */
+       struct vnodelst *vi_lrulisthd;          /* d   current lru list head */
+       TAILQ_ENTRY(vnode_impl) vi_lrulist;     /* d   lru list */
+       int             vi_synclist_slot;       /* s   synclist slot index */
+       int             vi_lrulisttm;           /* i   time of lru enqueue */
+       TAILQ_ENTRY(vnode_impl) vi_synclist;    /* s   vnodes with dirty bufs */
+       SLIST_ENTRY(vnode_impl) vi_hash;        /* c   vnode cache list */
+       enum vnode_state vi_state;              /* i   current state */
+       TAILQ_ENTRY(vnode_impl) vi_mntvnodes;   /* m   vnodes for mount point */
+
+       /*
         * Namecache.  Give it a separate line so activity doesn't impinge
         * on the stable stuff.
         */
@@ -88,27 +101,14 @@
        uint32_t        vi_nc_spare;            /* -   spare (padding) */
 
        /*
-        * vnode cache, LRU and syncer.  This all changes with some
-        * regularity so keep it together.
-        */
-       struct vnodelst *vi_lrulisthd           /* d   current lru list head */
-           __aligned(COHERENCY_UNIT);
-       TAILQ_ENTRY(vnode_impl) vi_lrulist;     /* d   lru list */
-       int             vi_synclist_slot;       /* s   synclist slot index */
-       int             vi_lrulisttm;           /* i   time of lru enqueue */
-       TAILQ_ENTRY(vnode_impl) vi_synclist;    /* s   vnodes with dirty bufs */
-       SLIST_ENTRY(vnode_impl) vi_hash;        /* c   vnode cache list */
-       enum vnode_state vi_state;              /* i   current state */
-
-       /*
         * Locks and expensive to access items which can be expected to
         * generate a cache miss.
         */
        krwlock_t       vi_lock                 /* -   lock for this vnode */
            __aligned(COHERENCY_UNIT);
-       krwlock_t       vi_nc_lock;             /* -   lock on node */
+       krwlock_t       vi_nc_lock              /* -   lock on node */
+           __aligned(COHERENCY_UNIT);
        krwlock_t       vi_nc_listlock;         /* -   lock on nn_list */
-       TAILQ_ENTRY(vnode_impl) vi_mntvnodes;   /* m   vnodes for mount point */
 };
 typedef struct vnode_impl vnode_impl_t;
 



Home | Main Index | Thread Index | Old Index