Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/x86 - pmap_check_inuse() is expensive so make it DE...



details:   https://anonhg.NetBSD.org/src/rev/4403ed553577
branches:  trunk
changeset: 745751:4403ed553577
user:      ad <ad%NetBSD.org@localhost>
date:      Tue Mar 10 22:38:41 2020 +0000

description:
- pmap_check_inuse() is expensive so make it DEBUG not DIAGNOSTIC.

- Put PV locking back in place with only a minor performance impact.
  pmap_enter() still needs more work - it's not easy to satisfy all the
  competing requirements so I'll do that with another change.

- Use pmap_find_ptp() (lookup only) in preference to pmap_get_ptp() (alloc).
  Make pm_ptphint indexed by VA not PA.  Replace the per-pmap radixtree for
  dynamic PV entries with a per-PTP rbtree.  Cuts system time during kernel
  build by ~10% for me.

diffstat:

 sys/arch/x86/include/pmap.h    |    5 +-
 sys/arch/x86/include/pmap_pv.h |   42 ++-
 sys/arch/x86/x86/pmap.c        |  500 +++++++++++++++++++++++-----------------
 3 files changed, 318 insertions(+), 229 deletions(-)

diffs (truncated from 1145 to 300 lines):

diff -r f24d28616f98 -r 4403ed553577 sys/arch/x86/include/pmap.h
--- a/sys/arch/x86/include/pmap.h       Tue Mar 10 15:58:36 2020 +0000
+++ b/sys/arch/x86/include/pmap.h       Tue Mar 10 22:38:41 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.h,v 1.110 2020/02/23 15:46:39 ad Exp $    */
+/*     $NetBSD: pmap.h,v 1.111 2020/03/10 22:38:41 ad Exp $    */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -67,8 +67,6 @@
 #ifndef _X86_PMAP_H_
 #define        _X86_PMAP_H_
 
-#include <sys/radixtree.h>
-
 /*
  * pl*_pi: index in the ptp page for a pde mapping a VA.
  * (pl*_i below is the index in the virtual array of all pdes per level)
@@ -257,7 +255,6 @@
        paddr_t pm_pdirpa[PDP_SIZE];    /* PA of PDs (read-only after create) */
        struct vm_page *pm_ptphint[PTP_LEVELS-1];
                                        /* pointer to a PTP in our pmap */
-       struct radix_tree pm_pvtree;    /* tree of non-embedded pv entries */
        struct pmap_statistics pm_stats;  /* pmap stats */
 
 #if !defined(__x86_64__)
diff -r f24d28616f98 -r 4403ed553577 sys/arch/x86/include/pmap_pv.h
--- a/sys/arch/x86/include/pmap_pv.h    Tue Mar 10 15:58:36 2020 +0000
+++ b/sys/arch/x86/include/pmap_pv.h    Tue Mar 10 22:38:41 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap_pv.h,v 1.12 2020/02/23 22:28:53 ad Exp $  */
+/*     $NetBSD: pmap_pv.h,v 1.13 2020/03/10 22:38:41 ad Exp $  */
 
 /*-
  * Copyright (c)2008 YAMAMOTO Takashi,
@@ -31,6 +31,7 @@
 
 #include <sys/mutex.h>
 #include <sys/queue.h>
+#include <sys/rbtree.h>
 
 struct vm_page;
 
@@ -56,6 +57,8 @@
 struct pv_entry {
        struct pv_pte pve_pte;          /* should be the first member */
        LIST_ENTRY(pv_entry) pve_list;  /* on pmap_page::pp_pvlist */
+       rb_node_t pve_rb;               /* red-black tree node */
+       uintptr_t pve_padding;          /* unused */
 };
 #define        pve_next        pve_list.le_next
 
@@ -65,26 +68,43 @@
 
 struct pmap_page {
        union {
-               /* PP_EMBEDDED */
-               struct pv_pte u_pte;
+               /* PTPs */
+               rb_tree_t rb;
 
                /* PTPs */
-               LIST_ENTRY(vm_page) u_link;
+               LIST_ENTRY(vm_page) link;
+
+               /* Non-PTPs */
+               struct {
+                       /* PP_EMBEDDED */
+                       struct pv_pte pte;
+
+                       LIST_HEAD(, pv_entry) pvlist;
+                       uint8_t flags;
+                       uint8_t attrs;
+               } s;
        } pp_u;
-       LIST_HEAD(, pv_entry) pp_pvlist;
-#define        pp_pte  pp_u.u_pte
-#define        pp_link pp_u.u_link
-       uint8_t pp_flags;
-       uint8_t pp_attrs;
+       kmutex_t        pp_lock;
+#define        pp_rb           pp_u.rb
+#define        pp_link         pp_u.link
+#define        pp_pte          pp_u.s.pte
+#define pp_pvlist      pp_u.s.pvlist
+#define        pp_pflags       pp_u.s.flags
+#define        pp_attrs        pp_u.s.attrs
+};
+
 #define PP_ATTRS_D     0x01    /* Dirty */
 #define PP_ATTRS_A     0x02    /* Accessed */
 #define PP_ATTRS_W     0x04    /* Writable */
-};
 
 /* pp_flags */
 #define        PP_EMBEDDED     1
 #define        PP_FREEING      2
 
-#define        PMAP_PAGE_INIT(pp)      LIST_INIT(&(pp)->pp_pvlist)
+#define        PMAP_PAGE_INIT(pp) \
+do { \
+       LIST_INIT(&(pp)->pp_pvlist); \
+       mutex_init(&(pp)->pp_lock, MUTEX_NODEBUG, IPL_VM); \
+} while (/* CONSTCOND */ 0);
 
 #endif /* !_X86_PMAP_PV_H_ */
diff -r f24d28616f98 -r 4403ed553577 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c   Tue Mar 10 15:58:36 2020 +0000
+++ b/sys/arch/x86/x86/pmap.c   Tue Mar 10 22:38:41 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.362 2020/03/04 22:00:03 ad Exp $    */
+/*     $NetBSD: pmap.c,v 1.363 2020/03/10 22:38:41 ad Exp $    */
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017, 2019, 2020 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.362 2020/03/04 22:00:03 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.363 2020/03/10 22:38:41 ad Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -293,6 +293,7 @@
 
 static struct pmap kernel_pmap_store __cacheline_aligned; /* kernel's pmap */
 struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
+static rb_tree_t pmap_kernel_rb __cacheline_aligned;
 
 struct bootspace bootspace __read_mostly;
 struct slotspace slotspace __read_mostly;
@@ -409,6 +410,21 @@
 };
 
 /*
+ * PV tree prototypes
+ */
+
+static int     pmap_compare_key(void *, const void *, const void *);
+static int     pmap_compare_nodes(void *, const void *, const void *);
+
+/* Read-black tree */
+static const rb_tree_ops_t pmap_rbtree_ops = {
+       .rbto_compare_nodes = pmap_compare_nodes,
+       .rbto_compare_key = pmap_compare_key,
+       .rbto_node_offset = offsetof(struct pv_entry, pve_rb),
+       .rbto_context = NULL
+};
+
+/*
  * Local prototypes
  */
 
@@ -431,7 +447,7 @@
 static void pmap_unget_ptp(struct pmap *, struct pmap_ptparray *);
 static void pmap_install_ptp(struct pmap *, struct pmap_ptparray *, vaddr_t,
     pd_entry_t * const *);
-static struct vm_page *pmap_find_ptp(struct pmap *, vaddr_t, paddr_t, int);
+static struct vm_page *pmap_find_ptp(struct pmap *, vaddr_t, int);
 static void pmap_freepage(struct pmap *, struct vm_page *, int);
 static void pmap_free_ptp(struct pmap *, struct vm_page *, vaddr_t,
     pt_entry_t *, pd_entry_t * const *);
@@ -440,10 +456,6 @@
 static void pmap_remove_ptes(struct pmap *, struct vm_page *, vaddr_t, vaddr_t,
     vaddr_t, struct pv_entry **);
 
-static int pmap_pvmap_insert(struct pmap *, vaddr_t, struct pv_entry *);
-static struct pv_entry *pmap_pvmap_lookup(struct pmap *, vaddr_t);
-static void pmap_pvmap_remove(struct pmap *, vaddr_t, struct pv_entry *);
-
 static void pmap_alloc_level(struct pmap *, vaddr_t, long *);
 
 static void pmap_load1(struct lwp *, struct pmap *, struct pmap *);
@@ -517,7 +529,8 @@
 pv_pte_first(struct pmap_page *pp)
 {
 
-       if ((pp->pp_flags & PP_EMBEDDED) != 0) {
+       KASSERT(mutex_owned(&pp->pp_lock));
+       if ((pp->pp_pflags & PP_EMBEDDED) != 0) {
                return &pp->pp_pte;
        }
        return pve_to_pvpte(LIST_FIRST(&pp->pp_pvlist));
@@ -527,9 +540,10 @@
 pv_pte_next(struct pmap_page *pp, struct pv_pte *pvpte)
 {
 
+       KASSERT(mutex_owned(&pp->pp_lock));
        KASSERT(pvpte != NULL);
        if (pvpte == &pp->pp_pte) {
-               KASSERT((pp->pp_flags & PP_EMBEDDED) != 0);
+               KASSERT((pp->pp_pflags & PP_EMBEDDED) != 0);
                return pve_to_pvpte(LIST_FIRST(&pp->pp_pvlist));
        }
        return pve_to_pvpte(LIST_NEXT(pvpte_to_pve(pvpte), pve_list));
@@ -553,6 +567,44 @@
 }
 
 /*
+ * rbtree: compare two nodes.
+ */
+static int
+pmap_compare_nodes(void *context, const void *n1, const void *n2)
+{
+       const struct pv_entry *pve1 = n1;
+       const struct pv_entry *pve2 = n2;
+
+       KASSERT(pve1->pve_pte.pte_ptp == pve2->pve_pte.pte_ptp);
+
+       if (pve1->pve_pte.pte_va < pve2->pve_pte.pte_va) {
+               return -1;
+       }
+       if (pve1->pve_pte.pte_va > pve2->pve_pte.pte_va) {
+               return 1;
+       }
+       return 0;
+}
+
+/*
+ * rbtree: compare a node and a key.
+ */
+static int
+pmap_compare_key(void *context, const void *n, const void *k)
+{
+       const struct pv_entry *pve = n;
+       const vaddr_t key = (vaddr_t)k;
+
+       if (pve->pve_pte.pte_va < key) {
+               return -1;
+       }
+       if (pve->pve_pte.pte_va > key) {
+               return 1;
+       }
+       return 0;
+}
+
+/*
  * pmap_map_ptes: map a pmap's PTEs into KVM and lock them in
  *
  * there are several pmaps involved.  some or all of them might be same.
@@ -1696,9 +1748,14 @@
        pool_init(&pmap_pdp_pool, PAGE_SIZE, 0, 0, flags,
            "pdppl", NULL, IPL_NONE);
 #endif
-       pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0,
-           PR_LARGECACHE, "pvpl", &pool_allocator_kmem, IPL_NONE, NULL,
-           NULL, NULL);
+       pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry),
+#ifdef _LP64
+           coherency_unit,
+#else
+           coherency_unit / 2,
+#endif
+            0, PR_LARGECACHE, "pvpl", &pool_allocator_kmem,
+           IPL_NONE, NULL, NULL, NULL);
 
        pmap_tlb_init();
 
@@ -1710,7 +1767,14 @@
        evcnt_attach_dynamic(&pmap_ldt_evcnt, EVCNT_TYPE_MISC,
            NULL, "x86", "ldt sync");
 
-       radix_tree_init_tree(&pmap_kernel()->pm_pvtree);
+       /*
+        * The kernel doesn't keep track of PTPs, so there's nowhere handy
+        * to hang a tree of pv_entry records.  Dynamically allocated
+        * pv_entry lists are not heavily used in the kernel's pmap (the
+        * usual case is PP_EMBEDDED), so cop out and use a single RB tree
+        * to cover them.
+        */
+       rb_tree_init(&pmap_kernel_rb, &pmap_rbtree_ops);
 
        /*
         * done: pmap module is up (and ready for business)
@@ -1795,8 +1859,7 @@
 
 
 /*
- * pmap_pp_needs_pve: return true if we need to allocate a pv entry and
- * corresponding radix tree entry for the page.
+ * pmap_pp_needs_pve: return true if we need to allocate a pv entry.
  */
 static bool
 pmap_pp_needs_pve(struct pmap_page *pp, struct vm_page *ptp, vaddr_t va)
@@ -1810,7 +1873,7 @@
         * still be pv entries on the list.
         */
 
-       if (pp == NULL || (pp->pp_flags & PP_EMBEDDED) == 0) {
+       if (pp == NULL || (pp->pp_pflags & PP_EMBEDDED) == 0) {
                return false;
        }
        return pp->pp_pte.pte_ptp != ptp || pp->pp_pte.pte_va != va;
@@ -1831,60 +1894,59 @@
        KASSERT(mutex_owned(&pmap->pm_lock));
 
        for ( /* null */ ; pve != NULL ; pve = next) {
-               pmap_pvmap_remove(pmap, pve->pve_pte.pte_va, pve);
                next = pve->pve_next;



Home | Main Index | Thread Index | Old Index