Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/x86 Allocate PV entries in PAGE_SIZE chunks, and ca...



details:   https://anonhg.NetBSD.org/src/rev/7cb542fb940c
branches:  trunk
changeset: 1008894:7cb542fb940c
user:      ad <ad%NetBSD.org@localhost>
date:      Sun Apr 05 00:21:11 2020 +0000

description:
Allocate PV entries in PAGE_SIZE chunks, and cache partially allocated PV
pages with the pmap.  Worth about 2-3% sys time on build.sh for me.

diffstat:

 sys/arch/x86/include/pmap.h |    5 +-
 sys/arch/x86/x86/pmap.c     |  267 ++++++++++++++++++++++++++++---------------
 2 files changed, 175 insertions(+), 97 deletions(-)

diffs (truncated from 586 to 300 lines):

diff -r c18417176f58 -r 7cb542fb940c sys/arch/x86/include/pmap.h
--- a/sys/arch/x86/include/pmap.h       Sat Apr 04 23:58:54 2020 +0000
+++ b/sys/arch/x86/include/pmap.h       Sun Apr 05 00:21:11 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.h,v 1.116 2020/03/22 00:16:16 ad Exp $    */
+/*     $NetBSD: pmap.h,v 1.117 2020/04/05 00:21:11 ad Exp $    */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -259,6 +259,9 @@
                                        /* pointer to a PTP in our pmap */
        struct pmap_statistics pm_stats;  /* pmap stats */
        struct pv_entry *pm_pve;        /* spare pv_entry */
+       LIST_HEAD(, pv_page) pm_pvp_part;
+       LIST_HEAD(, pv_page) pm_pvp_empty;
+       LIST_HEAD(, pv_page) pm_pvp_full;
 
 #if !defined(__x86_64__)
        vaddr_t pm_hiexec;              /* highest executable mapping */
diff -r c18417176f58 -r 7cb542fb940c sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c   Sat Apr 04 23:58:54 2020 +0000
+++ b/sys/arch/x86/x86/pmap.c   Sun Apr 05 00:21:11 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.380 2020/03/22 00:16:16 ad Exp $    */
+/*     $NetBSD: pmap.c,v 1.381 2020/04/05 00:21:11 ad Exp $    */
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017, 2019, 2020 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.380 2020/03/22 00:16:16 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.381 2020/04/05 00:21:11 ad Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -371,9 +371,9 @@
 static void pmap_dtor(void *, void *);
 
 /*
- * pv_entry cache
+ * pv_page cache
  */
-static struct pool_cache pmap_pv_cache;
+static struct pool_cache pmap_pvp_cache;
 
 #ifdef __HAVE_DIRECT_MAP
 vaddr_t pmap_direct_base __read_mostly;
@@ -430,6 +430,20 @@
 };
 
 /*
+ * PV entries are allocated in page-sized chunks and cached per-pmap to
+ * avoid intense pressure on memory allocators.
+ */
+
+struct pv_page {
+       LIST_HEAD(, pv_entry)   pvp_pves;
+       LIST_ENTRY(pv_page)     pvp_list;
+       long                    pvp_nfree;
+       struct pmap             *pvp_pmap;
+};
+
+#define        PVE_PER_PVP     ((PAGE_SIZE / sizeof(struct pv_entry)) - 1)
+
+/*
  * PV tree prototypes
  */
 
@@ -472,9 +486,13 @@
 static void pmap_free_ptp(struct pmap *, struct vm_page *, vaddr_t,
     pt_entry_t *, pd_entry_t * const *);
 static bool pmap_remove_pte(struct pmap *, struct vm_page *, pt_entry_t *,
-    vaddr_t, struct pv_entry **);
+    vaddr_t);
 static void pmap_remove_ptes(struct pmap *, struct vm_page *, vaddr_t, vaddr_t,
-    vaddr_t, struct pv_entry **);
+    vaddr_t);
+static int pmap_pvp_ctor(void *, void *, int);
+static void pmap_pvp_dtor(void *, void *);
+static struct pv_entry *pmap_alloc_pv(struct pmap *);
+static void pmap_free_pv(struct pmap *, struct pv_entry *);
 
 static void pmap_alloc_level(struct pmap *, vaddr_t, long *);
 
@@ -1837,14 +1855,9 @@
        pool_init(&pmap_pdp_pool, PAGE_SIZE, 0, 0, flags,
            "pdppl", NULL, IPL_NONE);
 #endif
-       pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry),
-#ifdef _LP64
-           coherency_unit,
-#else
-           coherency_unit / 2,
-#endif
-            0, PR_LARGECACHE, "pvpl", &pool_allocator_kmem,
-           IPL_NONE, NULL, NULL, NULL);
+       pool_cache_bootstrap(&pmap_pvp_cache, PAGE_SIZE, PAGE_SIZE,
+            0, 0, "pvpage", &pool_allocator_kmem,
+           IPL_NONE, pmap_pvp_ctor, pmap_pvp_dtor, NULL);
 
        pmap_tlb_init();
 
@@ -1947,23 +1960,109 @@
  */
 
 /*
- * pmap_free_pvs: free a linked list of pv entries.  the pv entries have
- * been removed from their respective pages, but are still entered into the
- * map and we must undo that.
- *
- * => must be called with pmap locked.
+ * pmap_pvp_dtor: pool_cache constructor for PV pages.
+ */
+static int
+pmap_pvp_ctor(void *arg, void *obj, int flags)
+{
+       struct pv_page *pvp = (struct pv_page *)obj;
+       struct pv_entry *pve = (struct pv_entry *)obj + 1;
+       struct pv_entry *maxpve = pve + PVE_PER_PVP;
+
+       KASSERT(sizeof(struct pv_page) <= sizeof(struct pv_entry));
+       KASSERT(trunc_page((vaddr_t)obj) == (vaddr_t)obj);
+
+       LIST_INIT(&pvp->pvp_pves);
+       pvp->pvp_nfree = PVE_PER_PVP;
+       pvp->pvp_pmap = NULL;
+
+       for (; pve < maxpve; pve++) {
+               LIST_INSERT_HEAD(&pvp->pvp_pves, pve, pve_list);
+       }
+
+       return 0;
+}
+
+/*
+ * pmap_pvp_dtor: pool_cache destructor for PV pages.
  */
 static void
-pmap_free_pvs(struct pmap *pmap, struct pv_entry *pve)
+pmap_pvp_dtor(void *arg, void *obj)
 {
-       struct pv_entry *next;
+       struct pv_page *pvp __diagused = obj;
+
+       KASSERT(pvp->pvp_pmap == NULL);
+       KASSERT(pvp->pvp_nfree == PVE_PER_PVP);
+}
+
+/*
+ * pmap_alloc_pv: allocate a PV entry (likely cached with pmap).
+ */
+static struct pv_entry *
+pmap_alloc_pv(struct pmap *pmap)
+{
+       struct pv_entry *pve;
+       struct pv_page *pvp;
 
        KASSERT(mutex_owned(&pmap->pm_lock));
 
-       for ( /* null */ ; pve != NULL ; pve = next) {
-               next = pve->pve_next;
-               pool_cache_put(&pmap_pv_cache, pve);
-       }
+       if (__predict_false((pvp = LIST_FIRST(&pmap->pm_pvp_part)) == NULL)) {
+               if ((pvp = LIST_FIRST(&pmap->pm_pvp_full)) != NULL) {
+                       LIST_REMOVE(pvp, pvp_list);
+               } else {
+                       pvp = pool_cache_get(&pmap_pvp_cache, PR_NOWAIT);
+               }
+               if (__predict_false(pvp == NULL)) {
+                       return NULL;
+               }
+               /* full -> part */
+               LIST_INSERT_HEAD(&pmap->pm_pvp_part, pvp, pvp_list);
+               pvp->pvp_pmap = pmap;
+       }
+
+       KASSERT(pvp->pvp_pmap == pmap);
+       KASSERT(pvp->pvp_nfree > 0);
+
+       pve = LIST_FIRST(&pvp->pvp_pves);
+       LIST_REMOVE(pve, pve_list);
+       pvp->pvp_nfree--;
+
+       if (__predict_false(pvp->pvp_nfree == 0)) {
+               /* part -> empty */
+               KASSERT(LIST_EMPTY(&pvp->pvp_pves));
+               LIST_REMOVE(pvp, pvp_list);
+               LIST_INSERT_HEAD(&pmap->pm_pvp_empty, pvp, pvp_list);
+       } else {
+               KASSERT(!LIST_EMPTY(&pvp->pvp_pves));
+       }
+
+       return pve;
+}
+
+/*
+ * pmap_free_pv: delayed free of a PV entry.
+ */
+static void
+pmap_free_pv(struct pmap *pmap, struct pv_entry *pve)
+{
+       struct pv_page *pvp = (struct pv_page *)trunc_page((vaddr_t)pve);
+
+       KASSERT(mutex_owned(&pmap->pm_lock));
+       KASSERT(pvp->pvp_pmap == pmap);
+       KASSERT(pvp->pvp_nfree >= 0);
+
+       LIST_INSERT_HEAD(&pvp->pvp_pves, pve, pve_list);
+       pvp->pvp_nfree++;
+
+       if (__predict_false(pvp->pvp_nfree == 1)) {
+               /* empty -> part */
+               LIST_REMOVE(pvp, pvp_list);
+               LIST_INSERT_HEAD(&pmap->pm_pvp_part, pvp, pvp_list);
+       } else if (__predict_false(pvp->pvp_nfree == PVE_PER_PVP)) {
+               /* part -> full */
+               LIST_REMOVE(pvp, pvp_list);
+               LIST_INSERT_HEAD(&pmap->pm_pvp_full, pvp, pvp_list);
+       } 
 }
 
 /*
@@ -2129,7 +2228,7 @@
         * case it's needed; won't know for sure until the lock is taken.
         */
        if (pmap->pm_pve == NULL) {
-               pmap->pm_pve = pool_cache_get(&pmap_pv_cache, PR_NOWAIT);
+               pmap->pm_pve = pmap_alloc_pv(pmap);
        }
 
        error = 0;
@@ -2171,7 +2270,7 @@
  * pmap_remove_pv: try to remove a mapping from a pv_list
  *
  * => pmap must be locked
- * => removes dynamic entries from tree
+ * => removes dynamic entries from tree and frees them
  * => caller should adjust ptp's wire_count and free PTP if needed
  */
 static void
@@ -2213,6 +2312,7 @@
 #ifdef DIAGNOSTIC
                memset(pve, 0, sizeof(*pve));
 #endif
+               pmap_free_pv(pmap, pve);
        }
 
        KASSERT(pmap_treelookup_pv(pmap, ptp, tree, va) == NULL);
@@ -2670,6 +2770,9 @@
 #endif
        LIST_INIT(&pmap->pm_gc_ptp);
        pmap->pm_pve = NULL;
+       LIST_INIT(&pmap->pm_pvp_full);
+       LIST_INIT(&pmap->pm_pvp_part);
+       LIST_INIT(&pmap->pm_pvp_empty);
 
        /* allocate and init PDP */
        pmap->pm_pdir = pool_get(&pmap_pdp_pool, PR_WAITOK);
@@ -2702,10 +2805,6 @@
 {
        struct pmap *pmap = obj;
 
-       if (pmap->pm_pve != NULL) {
-               pool_cache_put(&pmap_pv_cache, pmap->pm_pve);
-       }
-
        mutex_enter(&pmaps_lock);
        LIST_REMOVE(pmap, pm_list);
        mutex_exit(&pmaps_lock);
@@ -2832,13 +2931,16 @@
        pmap_check_inuse(pmap);
 
        /*
-        * XXX handle deferred PTP page free for EPT.  ordinarily this is
-        * taken care of by pmap_remove_all().  once shared with EPT this
-        * can go away.
+        * handle any deferred frees.
         */
-       if (__predict_false(!LIST_EMPTY(&pmap->pm_gc_ptp))) {
-               pmap_update(pmap);
-       }
+
+       if (pmap->pm_pve != NULL) {
+               mutex_enter(&pmap->pm_lock);
+               pmap_free_pv(pmap, pmap->pm_pve);
+               mutex_exit(&pmap->pm_lock);
+               pmap->pm_pve = NULL;
+       }
+       pmap_update(pmap);
 
        /*
         * Reference count is zero, free pmap resources and then free pmap.
@@ -2874,6 +2976,10 @@
        kcpuset_zero(pmap->pm_xen_ptp_cpus);
 #endif
 
+       KASSERT(LIST_EMPTY(&pmap->pm_pvp_full));



Home | Main Index | Thread Index | Old Index