Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm pagedaemon:



details:   https://anonhg.NetBSD.org/src/rev/03df87768c80
branches:  trunk
changeset: 466674:03df87768c80
user:      ad <ad%NetBSD.org@localhost>
date:      Mon Dec 30 18:08:37 2019 +0000

description:
pagedaemon:

- Use marker pages to keep place in the queue when scanning, rather than
  relying on assumptions.

- In uvmpdpol_balancequeue(), lock the object once instead of twice.

- When draining pools, the situation is getting desperate, but try to avoid
  saturating the system with xcall, lock and interrupt activity by sleeping
  for 1 clock tick if being continually awoken and all pools have been
  cycled through at least once.

- Pause & resume the freelist cache during pool draining.

PR kern/54209: NetBSD 8 large memory performance extremely low
PR kern/54210: NetBSD-8 processes presumably not exiting
PR kern/54727: writing a large file causes unreasonable system behaviour

diffstat:

 sys/uvm/uvm_pdaemon.c           |   93 +++++++++++++---------------
 sys/uvm/uvm_pdaemon.h           |    6 +-
 sys/uvm/uvm_pdpolicy.h          |    3 +-
 sys/uvm/uvm_pdpolicy_clock.c    |  126 +++++++++++++++++++++++++--------------
 sys/uvm/uvm_pdpolicy_clockpro.c |   37 ++++++++++-
 5 files changed, 159 insertions(+), 106 deletions(-)

diffs (truncated from 473 to 300 lines):

diff -r 7782638fa43c -r 03df87768c80 sys/uvm/uvm_pdaemon.c
--- a/sys/uvm/uvm_pdaemon.c     Mon Dec 30 17:47:06 2019 +0000
+++ b/sys/uvm/uvm_pdaemon.c     Mon Dec 30 18:08:37 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_pdaemon.c,v 1.118 2019/12/21 16:10:20 ad Exp $     */
+/*     $NetBSD: uvm_pdaemon.c,v 1.119 2019/12/30 18:08:37 ad Exp $     */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.118 2019/12/21 16:10:20 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.119 2019/12/30 18:08:37 ad Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_readahead.h"
@@ -83,6 +83,7 @@
 
 #include <uvm/uvm.h>
 #include <uvm/uvm_pdpolicy.h>
+#include <uvm/uvm_pgflcache.h>
 
 #ifdef UVMHIST
 UVMHIST_DEFINE(pdhist);
@@ -598,7 +599,7 @@
  * => return true if a page had an associated slot.
  */
 
-static bool
+bool
 uvmpd_dropswap(struct vm_page *pg)
 {
        bool result = false;
@@ -622,50 +623,6 @@
        return result;
 }
 
-/*
- * uvmpd_trydropswap: try to free any swap allocated to this page.
- *
- * => return true if a slot is successfully freed.
- * => page interlock must be held, and will be dropped.
- */
-
-bool
-uvmpd_trydropswap(struct vm_page *pg)
-{
-       kmutex_t *slock;
-       bool result;
-
-       if ((pg->flags & PG_BUSY) != 0) {
-               mutex_exit(&pg->interlock);
-               return false;
-       }
-
-       /*
-        * lock the page's owner.
-        * this will drop pg->interlock.
-        */
-
-       slock = uvmpd_trylockowner(pg);
-       if (slock == NULL) {
-               return false;
-       }
-
-       /*
-        * skip this page if it's busy.
-        */
-
-       if ((pg->flags & PG_BUSY) != 0) {
-               mutex_exit(slock);
-               return false;
-       }
-
-       result = uvmpd_dropswap(pg);
-
-       mutex_exit(slock);
-
-       return result;
-}
-
 #endif /* defined(VMSWAP) */
 
 /*
@@ -909,6 +866,8 @@
 #endif /* defined(VMSWAP) */
        }
 
+       uvmpdpol_scanfini();
+
 #if defined(VMSWAP)
        swapcluster_flush(&swc, true);
 #endif /* defined(VMSWAP) */
@@ -1031,17 +990,44 @@
 static void
 uvmpd_pool_drain_thread(void *arg)
 {
-       int bufcnt;
+       struct pool *firstpool, *curpool;
+       int bufcnt, lastslept;
+       bool cycled;
 
+       firstpool = NULL;
+       cycled = true;
        for (;;) {
+               /*
+                * sleep until awoken by the pagedaemon.
+                */
                mutex_enter(&uvmpd_lock);
                if (!uvmpd_pool_drain_run) {
+                       lastslept = hardclock_ticks;
                        cv_wait(&uvmpd_pool_drain_cv, &uvmpd_lock);
+                       if (hardclock_ticks != lastslept) {
+                               cycled = false;
+                               firstpool = NULL;
+                       }
                }
                uvmpd_pool_drain_run = false;
                mutex_exit(&uvmpd_lock);
 
                /*
+                * rate limit draining, otherwise in desperate circumstances
+                * this can totally saturate the system with xcall activity.
+                */
+               if (cycled) {
+                       kpause("uvmpdlmt", false, 1, NULL);
+                       cycled = false;
+                       firstpool = NULL;
+               }
+
+               /*
+                * drain and temporarily disable the freelist cache.
+                */
+               uvm_pgflcache_pause();
+
+               /*
                 * kill unused metadata buffers.
                 */
                bufcnt = uvmexp.freetarg - uvm_free();
@@ -1053,9 +1039,16 @@
                mutex_exit(&bufcache_lock);
 
                /*
-                * drain a pool.
+                * drain a pool, and then re-enable the freelist cache. 
                 */
-               pool_drain(NULL);
+               (void)pool_drain(&curpool);
+               KASSERT(curpool != NULL);
+               if (firstpool == NULL) {
+                       firstpool = curpool;
+               } else if (firstpool == curpool) {
+                       cycled = true;
+               }
+               uvm_pgflcache_resume();
        }
        /*NOTREACHED*/
 }
diff -r 7782638fa43c -r 03df87768c80 sys/uvm/uvm_pdaemon.h
--- a/sys/uvm/uvm_pdaemon.h     Mon Dec 30 17:47:06 2019 +0000
+++ b/sys/uvm/uvm_pdaemon.h     Mon Dec 30 18:08:37 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_pdaemon.h,v 1.17 2011/02/02 15:25:27 chuck Exp $   */
+/*     $NetBSD: uvm_pdaemon.h,v 1.18 2019/12/30 18:08:38 ad Exp $      */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -79,9 +79,9 @@
 
 kmutex_t *uvmpd_trylockowner(struct vm_page *);
 #ifdef VMSWAP
-bool uvmpd_trydropswap(struct vm_page *);
+bool uvmpd_dropswap(struct vm_page *);
 #else
-#define uvmpd_trydropswap(_a_) (/*CONSTCOND*/false)
+#define uvmpd_dropswap(_a_) (/*CONSTCOND*/false)
 #endif
 
 #endif /* _KERNEL */
diff -r 7782638fa43c -r 03df87768c80 sys/uvm/uvm_pdpolicy.h
--- a/sys/uvm/uvm_pdpolicy.h    Mon Dec 30 17:47:06 2019 +0000
+++ b/sys/uvm/uvm_pdpolicy.h    Mon Dec 30 18:08:37 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_pdpolicy.h,v 1.4 2019/12/13 20:10:22 ad Exp $      */
+/*     $NetBSD: uvm_pdpolicy.h,v 1.5 2019/12/30 18:08:38 ad Exp $      */
 
 /*-
  * Copyright (c)2005, 2006 YAMAMOTO Takashi,
@@ -51,6 +51,7 @@
 
 void uvmpdpol_tune(void);
 void uvmpdpol_scaninit(void);
+void uvmpdpol_scanfini(void);
 struct vm_page *uvmpdpol_selectvictim(kmutex_t **lock);
 void uvmpdpol_balancequeue(int);
 
diff -r 7782638fa43c -r 03df87768c80 sys/uvm/uvm_pdpolicy_clock.c
--- a/sys/uvm/uvm_pdpolicy_clock.c      Mon Dec 30 17:47:06 2019 +0000
+++ b/sys/uvm/uvm_pdpolicy_clock.c      Mon Dec 30 18:08:37 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_pdpolicy_clock.c,v 1.23 2019/12/27 13:13:17 ad Exp $       */
+/*     $NetBSD: uvm_pdpolicy_clock.c,v 1.24 2019/12/30 18:08:38 ad Exp $       */
 /*     NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $     */
 
 /*
@@ -69,7 +69,7 @@
 #else /* defined(PDSIM) */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.23 2019/12/27 13:13:17 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.24 2019/12/30 18:08:38 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/proc.h>
@@ -110,9 +110,8 @@
 };
 
 struct uvmpdpol_scanstate {
-       bool ss_first;
        bool ss_anonreact, ss_filereact, ss_execreact;
-       struct vm_page *ss_nextpg;
+       struct vm_page ss_marker;
 };
 
 static void    uvmpdpol_pageactivate_locked(struct vm_page *);
@@ -177,8 +176,20 @@
        ss->ss_anonreact = anonreact;
        ss->ss_filereact = filereact;
        ss->ss_execreact = execreact;
+       memset(&ss->ss_marker, 0, sizeof(ss->ss_marker));
+       ss->ss_marker.flags = PG_MARKER;
+       TAILQ_INSERT_HEAD(&pdpol_state.s_inactiveq, &ss->ss_marker, pdqueue);
+       mutex_exit(&s->lock);
+}
 
-       ss->ss_first = true;
+void
+uvmpdpol_scanfini(void)
+{
+       struct uvmpdpol_globalstate *s = &pdpol_state;
+       struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
+
+       mutex_enter(&s->lock);
+       TAILQ_REMOVE(&pdpol_state.s_inactiveq, &ss->ss_marker, pdqueue);
        mutex_exit(&s->lock);
 }
 
@@ -195,19 +206,11 @@
                struct vm_anon *anon;
                struct uvm_object *uobj;
 
-               if (ss->ss_first) {
-                       pg = TAILQ_FIRST(&pdpol_state.s_inactiveq);
-                       ss->ss_first = false;
-               } else {
-                       pg = ss->ss_nextpg;
-                       if (pg != NULL && (pg->pqflags & PQ_INACTIVE) == 0) {
-                               pg = TAILQ_FIRST(&pdpol_state.s_inactiveq);
-                       }
-               }
+               pg = TAILQ_NEXT(&ss->ss_marker, pdqueue);
                if (pg == NULL) {
                        break;
                }
-               ss->ss_nextpg = TAILQ_NEXT(pg, pdqueue);
+               KASSERT((pg->flags & PG_MARKER) == 0);
                uvmexp.pdscans++;
 
                /*
@@ -225,6 +228,14 @@
                }
 
                /*
+                * now prepare to move on to the next page.
+                */
+               TAILQ_REMOVE(&pdpol_state.s_inactiveq, &ss->ss_marker,
+                   pdqueue);
+               TAILQ_INSERT_AFTER(&pdpol_state.s_inactiveq, pg,
+                   &ss->ss_marker, pdqueue);
+
+               /*
                 * enforce the minimum thresholds on different
                 * types of memory usage.  if reusing the current
                 * page would reduce that type of usage below its
@@ -300,7 +311,7 @@
 {
        struct uvmpdpol_globalstate *s = &pdpol_state;
        int inactive_shortage;
-       struct vm_page *p, *nextpg;
+       struct vm_page *p, marker;
        kmutex_t *lock;
 
        /*
@@ -308,34 +319,22 @@
         * our inactive target.
         */
 
-       mutex_enter(&s->lock);



Home | Main Index | Thread Index | Old Index