Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys In uvmpd_tryownerlock(), if the initial try-lock of the ...



details:   https://anonhg.NetBSD.org/src/rev/4f214450991e
branches:  trunk
changeset: 942017:4f214450991e
user:      chs <chs%NetBSD.org@localhost>
date:      Wed Nov 04 01:30:19 2020 +0000

description:
In uvmpd_tryownerlock(), if the initial try-lock of the owner lock fails
then rather than do more try-locks and eventually sleep for a tick,
take a hold on the current owner's lock, drop the page interlock,
and acquire the lock that we took the hold on in a blocking fashion.
After we get the lock, check if the lock that we acquired is still
the lock for the owner of the page that we're interested in.
If the owner hasn't changed then can proceed with this page,
otherwise we will skip this page and move on to a different page.
This dramatically reduces the amount of time that the pagedaemon
sleeps trying to get locks, since even 1 tick is an eternity to sleep
in this context and it was easy to trigger that case in practice,
and with this new method the pagedaemon only very rarely actually blocks
to acquire the lock that it wants since the object locks are adaptive,
and when the pagedaemon does block then the amount of time it spends
sleeping will be generally be much less than 1 tick.

diffstat:

 sys/kern/init_main.c  |    5 +-
 sys/uvm/uvm_aobj.c    |   35 ++++++++------
 sys/uvm/uvm_init.c    |    7 ++-
 sys/uvm/uvm_pdaemon.c |  120 +++++++++++++++++++++++++++----------------------
 4 files changed, 93 insertions(+), 74 deletions(-)

diffs (truncated from 306 to 300 lines):

diff -r 33f36ecff210 -r 4f214450991e sys/kern/init_main.c
--- a/sys/kern/init_main.c      Tue Nov 03 22:21:43 2020 +0000
+++ b/sys/kern/init_main.c      Wed Nov 04 01:30:19 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: init_main.c,v 1.531 2020/09/08 16:00:35 riastradh Exp $        */
+/*     $NetBSD: init_main.c,v 1.532 2020/11/04 01:30:19 chs Exp $      */
 
 /*-
  * Copyright (c) 2008, 2009, 2019 The NetBSD Foundation, Inc.
@@ -97,7 +97,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.531 2020/09/08 16:00:35 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.532 2020/11/04 01:30:19 chs Exp $");
 
 #include "opt_cnmagic.h"
 #include "opt_ddb.h"
@@ -329,7 +329,6 @@
 
        /* Initialize lock caches. */
        mutex_obj_init();
-       rw_obj_init();
 
        /* Initialize radix trees (used by numerous subsystems). */
        radix_tree_init();
diff -r 33f36ecff210 -r 4f214450991e sys/uvm/uvm_aobj.c
--- a/sys/uvm/uvm_aobj.c        Tue Nov 03 22:21:43 2020 +0000
+++ b/sys/uvm/uvm_aobj.c        Wed Nov 04 01:30:19 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_aobj.c,v 1.151 2020/08/19 15:36:41 chs Exp $       */
+/*     $NetBSD: uvm_aobj.c,v 1.152 2020/11/04 01:30:19 chs Exp $       */
 
 /*
  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -38,7 +38,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.151 2020/08/19 15:36:41 chs Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.152 2020/11/04 01:30:19 chs Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_uvmhist.h"
@@ -416,7 +416,7 @@
 uao_create(voff_t size, int flags)
 {
        static struct uvm_aobj kernel_object_store;
-       static krwlock_t kernel_object_lock __cacheline_aligned;
+       static krwlock_t bootstrap_kernel_object_lock;
        static int kobj_alloced __diagused = 0;
        pgoff_t pages = round_page((uint64_t)size) >> PAGE_SHIFT;
        struct uvm_aobj *aobj;
@@ -458,25 +458,30 @@
         * we are still booting we should be the only thread around.
         */
 
-       if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
+       const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0;
+       if (flags == 0 || kernswap) {
 #if defined(VMSWAP)
-               const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0;
 
                /* allocate hash table or array depending on object size */
                if (UAO_USES_SWHASH(aobj)) {
                        aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
-                           HASH_LIST, kernswap ? false : true,
-                           &aobj->u_swhashmask);
-                       if (aobj->u_swhash == NULL)
-                               panic("uao_create: hashinit swhash failed");
+                           HASH_LIST, true, &aobj->u_swhashmask);
                } else {
                        aobj->u_swslots = kmem_zalloc(pages * sizeof(int),
-                           kernswap ? KM_NOSLEEP : KM_SLEEP);
-                       if (aobj->u_swslots == NULL)
-                               panic("uao_create: swslots allocation failed");
+                           KM_SLEEP);
                }
 #endif /* defined(VMSWAP) */
 
+               /*
+                * Replace kernel_object's temporary static lock with
+                * a regular rw_obj.  We cannot use uvm_obj_setlock()
+                * because that would try to free the old lock.
+                */
+
+               if (kernswap) {
+                       aobj->u_obj.vmobjlock = rw_obj_alloc();
+                       rw_destroy(&bootstrap_kernel_object_lock);
+               }
                if (flags) {
                        aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
                        return &aobj->u_obj;
@@ -490,9 +495,9 @@
        const bool kernobj = (flags & UAO_FLAG_KERNOBJ) != 0;
        uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs);
        if (__predict_false(kernobj)) {
-               /* Initialisation only once, for UAO_FLAG_KERNOBJ. */
-               rw_init(&kernel_object_lock);
-               uvm_obj_setlock(&aobj->u_obj, &kernel_object_lock);
+               /* Use a temporary static lock for kernel_object. */
+               rw_init(&bootstrap_kernel_object_lock);
+               uvm_obj_setlock(&aobj->u_obj, &bootstrap_kernel_object_lock);
        }
 
        /*
diff -r 33f36ecff210 -r 4f214450991e sys/uvm/uvm_init.c
--- a/sys/uvm/uvm_init.c        Tue Nov 03 22:21:43 2020 +0000
+++ b/sys/uvm/uvm_init.c        Wed Nov 04 01:30:19 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_init.c,v 1.54 2020/10/07 17:51:50 chs Exp $        */
+/*     $NetBSD: uvm_init.c,v 1.55 2020/11/04 01:30:19 chs Exp $        */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.54 2020/10/07 17:51:50 chs Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.55 2020/11/04 01:30:19 chs Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -166,8 +166,11 @@
 
        /*
         * Enable paging of kernel objects.
+        * This second pass of initializing kernel_object requires rw_obj,
+        * so initialize that first.
         */
 
+       rw_obj_init();
        uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
            UAO_FLAG_KERNSWAP);
 
diff -r 33f36ecff210 -r 4f214450991e sys/uvm/uvm_pdaemon.c
--- a/sys/uvm/uvm_pdaemon.c     Tue Nov 03 22:21:43 2020 +0000
+++ b/sys/uvm/uvm_pdaemon.c     Wed Nov 04 01:30:19 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_pdaemon.c,v 1.130 2020/07/09 05:57:15 skrll Exp $  */
+/*     $NetBSD: uvm_pdaemon.c,v 1.131 2020/11/04 01:30:19 chs Exp $    */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.130 2020/07/09 05:57:15 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.131 2020/11/04 01:30:19 chs Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_readahead.h"
@@ -100,8 +100,6 @@
 
 #define        UVMPD_NUMDIRTYREACTS    16
 
-#define        UVMPD_NUMTRYLOCKOWNER   128
-
 /*
  * local prototypes
  */
@@ -377,6 +375,32 @@
        mutex_spin_exit(&uvmpd_lock);
 }
 
+static krwlock_t *
+uvmpd_page_owner_lock(struct vm_page *pg)
+{
+       struct uvm_object *uobj = pg->uobject;
+       struct vm_anon *anon = pg->uanon;
+       krwlock_t *slock;
+
+       KASSERT(mutex_owned(&pg->interlock));
+
+#ifdef DEBUG
+       if (uobj == (void *)0xdeadbeef || anon == (void *)0xdeadbeef) {
+               return NULL;
+       }
+#endif
+       if (uobj != NULL) {
+               slock = uobj->vmobjlock;
+               KASSERTMSG(slock != NULL, "pg %p uobj %p, NULL lock", pg, uobj);
+       } else if (anon != NULL) {
+               slock = anon->an_lock;
+               KASSERTMSG(slock != NULL, "pg %p anon %p, NULL lock", pg, anon);
+       } else {
+               slock = NULL;
+       }
+       return slock;
+}
+
 /*
  * uvmpd_trylockowner: trylock the page's owner.
  *
@@ -388,71 +412,59 @@
 krwlock_t *
 uvmpd_trylockowner(struct vm_page *pg)
 {
-       struct uvm_object *uobj = pg->uobject;
-       struct vm_anon *anon = pg->uanon;
-       int tries, count;
-       bool running;
-       krwlock_t *slock;
+       krwlock_t *slock, *heldslock;
 
        KASSERT(mutex_owned(&pg->interlock));
 
-       if (uobj != NULL) {
-               slock = uobj->vmobjlock;
-               KASSERTMSG(slock != NULL, "pg %p uobj %p, NULL lock", pg, uobj);
-       } else if (anon != NULL) {
-               slock = anon->an_lock;
-               KASSERTMSG(slock != NULL, "pg %p anon %p, NULL lock", pg, anon);
-       } else {
+       slock = uvmpd_page_owner_lock(pg);
+       if (slock == NULL) {
                /* Page may be in state of flux - ignore. */
                mutex_exit(&pg->interlock);
                return NULL;
        }
 
-       /*
-        * Now try to lock the objects.  We'll try hard, but don't really
-        * plan on spending more than a millisecond or so here.
-        */
-       tries = (curlwp == uvm.pagedaemon_lwp ? UVMPD_NUMTRYLOCKOWNER : 1);
-       for (;;) {
-               if (rw_tryenter(slock, RW_WRITER)) {
-                       if (uobj == NULL) {
-                               /*
-                                * set PG_ANON if it isn't set already.
-                                */
-                               if ((pg->flags & PG_ANON) == 0) {
-                                       KASSERT(pg->loan_count > 0);
-                                       pg->loan_count--;
-                                       pg->flags |= PG_ANON;
-                                       /* anon now owns it */
-                               }
-                       }
-                       mutex_exit(&pg->interlock);
-                       return slock;
-               }
-               running = rw_owner_running(slock);
-               if (!running || --tries <= 0) {
-                       break;
-               }
-               count = SPINLOCK_BACKOFF_MAX;
-               SPINLOCK_BACKOFF(count);
+       if (rw_tryenter(slock, RW_WRITER)) {
+               goto success;
        }
 
        /*
-        * We didn't get the lock; chances are the very next page on the
-        * queue also has the same lock, so if the lock owner is not running
-        * take a breather and allow them to make progress.  There could be
-        * only 1 CPU in the system, or the pagedaemon could have preempted
-        * the owner in kernel, or any number of other things could be going
-        * on.
+        * The try-lock didn't work, so now do a blocking lock after
+        * dropping the page interlock.  Prevent the owner lock from
+        * being freed by taking a hold on it first.
         */
+
+       rw_obj_hold(slock);
        mutex_exit(&pg->interlock);
-       if (curlwp == uvm.pagedaemon_lwp) {
-               if (!running) {
-                       (void)kpause("pdpglock", false, 1, NULL);
+       rw_enter(slock, RW_WRITER);
+       heldslock = slock;
+
+       /*
+        * Now we hold some owner lock.  Check if the lock we hold
+        * is still the lock for the owner of the page.
+        * If it is then return it, otherwise release it and return NULL.
+        */
+
+       mutex_enter(&pg->interlock);
+       slock = uvmpd_page_owner_lock(pg);
+       if (heldslock != slock) {
+               rw_exit(heldslock);
+               slock = NULL;
+       }
+       rw_obj_free(heldslock);
+       if (slock != NULL) {
+success:
+               /*
+                * Set PG_ANON if it isn't set already.
+                */
+               if (pg->uobject == NULL && (pg->flags & PG_ANON) == 0) {
+                       KASSERT(pg->loan_count > 0);
+                       pg->loan_count--;
+                       pg->flags |= PG_ANON;
+                       /* anon now owns it */
                }
-               uvmexp.pdbusy++;
        }



Home | Main Index | Thread Index | Old Index