Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch Merge x86 pmap changes from yamt-pagecache:



details:   https://anonhg.NetBSD.org/src/rev/8ecb17993ebd
branches:  trunk
changeset: 466001:8ecb17993ebd
user:      ad <ad%NetBSD.org@localhost>
date:      Sun Dec 08 20:42:48 2019 +0000

description:
Merge x86 pmap changes from yamt-pagecache:

- Deal better with the multi-level pmap object locking kludge.
- Handle uvm_pagealloc() being able to block.

diffstat:

 sys/arch/x86/include/pmap.h |    5 +-
 sys/arch/x86/x86/pmap.c     |  174 ++++++++++++++++++++-----------------------
 sys/arch/x86/x86/svs.c      |    6 +-
 sys/arch/xen/x86/xen_pmap.c |    6 +-
 4 files changed, 91 insertions(+), 100 deletions(-)

diffs (truncated from 611 to 300 lines):

diff -r e2cd1720b361 -r 8ecb17993ebd sys/arch/x86/include/pmap.h
--- a/sys/arch/x86/include/pmap.h       Sun Dec 08 20:35:23 2019 +0000
+++ b/sys/arch/x86/include/pmap.h       Sun Dec 08 20:42:48 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.h,v 1.105 2019/11/14 16:23:52 maxv Exp $  */
+/*     $NetBSD: pmap.h,v 1.106 2019/12/08 20:42:48 ad Exp $    */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -249,8 +249,7 @@
 
 struct pmap {
        struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
-#define        pm_lock pm_obj[0].vmobjlock
-       kmutex_t pm_obj_lock[PTP_LEVELS-1];     /* locks for pm_objs */
+       kmutex_t pm_lock;               /* locks for pm_objs */
        LIST_ENTRY(pmap) pm_list;       /* list (lck by pm_list lock) */
        pd_entry_t *pm_pdir;            /* VA of PD (lck by object lock) */
        paddr_t pm_pdirpa[PDP_SIZE];    /* PA of PDs (read-only after create) */
diff -r e2cd1720b361 -r 8ecb17993ebd sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c   Sun Dec 08 20:35:23 2019 +0000
+++ b/sys/arch/x86/x86/pmap.c   Sun Dec 08 20:42:48 2019 +0000
@@ -1,7 +1,7 @@
-/*     $NetBSD: pmap.c,v 1.342 2019/12/03 15:20:59 riastradh Exp $     */
+/*     $NetBSD: pmap.c,v 1.343 2019/12/08 20:42:48 ad Exp $    */
 
 /*
- * Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2010, 2016, 2017, 2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -130,7 +130,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.342 2019/12/03 15:20:59 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.343 2019/12/08 20:42:48 ad Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -261,24 +261,6 @@
 static vaddr_t pmap_maxkvaddr;
 
 /*
- * XXX kludge: dummy locking to make KASSERTs in uvm_page.c comfortable.
- * actual locking is done by pm_lock.
- */
-#if defined(DIAGNOSTIC)
-#define        PMAP_SUBOBJ_LOCK(pm, idx) \
-       KASSERT(mutex_owned((pm)->pm_lock)); \
-       if ((idx) != 0) \
-               mutex_enter((pm)->pm_obj[(idx)].vmobjlock)
-#define        PMAP_SUBOBJ_UNLOCK(pm, idx) \
-       KASSERT(mutex_owned((pm)->pm_lock)); \
-       if ((idx) != 0) \
-               mutex_exit((pm)->pm_obj[(idx)].vmobjlock)
-#else /* defined(DIAGNOSTIC) */
-#define        PMAP_SUBOBJ_LOCK(pm, idx)       /* nothing */
-#define        PMAP_SUBOBJ_UNLOCK(pm, idx)     /* nothing */
-#endif /* defined(DIAGNOSTIC) */
-
-/*
  * Misc. event counters.
  */
 struct evcnt pmap_iobmp_evcnt;
@@ -475,8 +457,8 @@
 static void pmap_remap_largepages(void);
 #endif
 
-static struct vm_page *pmap_get_ptp(struct pmap *, vaddr_t,
-    pd_entry_t * const *, int);
+static int pmap_get_ptp(struct pmap *, vaddr_t,
+    pd_entry_t * const *, int, struct vm_page **);
 static struct vm_page *pmap_find_ptp(struct pmap *, vaddr_t, paddr_t, int);
 static void pmap_freepage(struct pmap *, struct vm_page *, int);
 static void pmap_free_ptp(struct pmap *, struct vm_page *, vaddr_t,
@@ -502,7 +484,7 @@
                atomic_add_long(&pmap->pm_stats.resident_count, resid_diff);
                atomic_add_long(&pmap->pm_stats.wired_count, wired_diff);
        } else {
-               KASSERT(mutex_owned(pmap->pm_lock));
+               KASSERT(mutex_owned(&pmap->pm_lock));
                pmap->pm_stats.resident_count += resid_diff;
                pmap->pm_stats.wired_count += wired_diff;
        }
@@ -640,13 +622,13 @@
 
        l = curlwp;
  retry:
-       mutex_enter(pmap->pm_lock);
+       mutex_enter(&pmap->pm_lock);
        ci = curcpu();
        curpmap = ci->ci_pmap;
        if (vm_map_pmap(&l->l_proc->p_vmspace->vm_map) == pmap) {
                /* Our own pmap so just load it: easy. */
                if (__predict_false(ci->ci_want_pmapload)) {
-                       mutex_exit(pmap->pm_lock);
+                       mutex_exit(&pmap->pm_lock);
                        pmap_load();
                        goto retry;
                }
@@ -670,6 +652,7 @@
                kcpuset_atomic_clear(curpmap->pm_kernel_cpus, cid);
                ci->ci_pmap = pmap;
                ci->ci_tlbstate = TLBSTATE_VALID;
+               ci->ci_want_pmapload = 0;
                kcpuset_atomic_set(pmap->pm_cpus, cid);
                kcpuset_atomic_set(pmap->pm_kernel_cpus, cid);
                cpu_load_pmap(pmap, curpmap);
@@ -717,7 +700,7 @@
        KASSERT(pmap->pm_ncsw == curlwp->l_ncsw);
        mypmap = vm_map_pmap(&curproc->p_vmspace->vm_map);
        if (pmap == mypmap) {
-               mutex_exit(pmap->pm_lock);
+               mutex_exit(&pmap->pm_lock);
                return;
        }
 
@@ -725,9 +708,15 @@
         * Mark whatever's on the CPU now as lazy and unlock.
         * If the pmap was already installed, we are done.
         */
-       ci->ci_tlbstate = TLBSTATE_LAZY;
-       ci->ci_want_pmapload = (mypmap != pmap_kernel());
-       mutex_exit(pmap->pm_lock);
+       if (ci->ci_tlbstate == TLBSTATE_VALID) {
+               ci->ci_tlbstate = TLBSTATE_LAZY;
+               ci->ci_want_pmapload = (mypmap != pmap_kernel());
+       } else {
+               /*
+                * This can happen when undoing after pmap_get_ptp blocked.
+                */ 
+       }
+       mutex_exit(&pmap->pm_lock);
        if (pmap == pmap2) {
                return;
        }
@@ -1089,10 +1078,10 @@
         * tables (fast user-level vtophys?). This may or may not be useful.
         */
        kpm = pmap_kernel();
+       mutex_init(&kpm->pm_lock, MUTEX_DEFAULT, IPL_NONE);
        for (i = 0; i < PTP_LEVELS - 1; i++) {
-               mutex_init(&kpm->pm_obj_lock[i], MUTEX_DEFAULT, IPL_NONE);
                uvm_obj_init(&kpm->pm_obj[i], NULL, false, 1);
-               uvm_obj_setlock(&kpm->pm_obj[i], &kpm->pm_obj_lock[i]);
+               uvm_obj_setlock(&kpm->pm_obj[i], &kpm->pm_lock);
                kpm->pm_ptphint[i] = NULL;
        }
        memset(&kpm->pm_list, 0, sizeof(kpm->pm_list));  /* pm_list not used */
@@ -1992,15 +1981,13 @@
        int lidx = level - 1;
        struct vm_page *pg;
 
-       KASSERT(mutex_owned(pmap->pm_lock));
+       KASSERT(mutex_owned(&pmap->pm_lock));
 
        if (pa != (paddr_t)-1 && pmap->pm_ptphint[lidx] &&
            pa == VM_PAGE_TO_PHYS(pmap->pm_ptphint[lidx])) {
                return (pmap->pm_ptphint[lidx]);
        }
-       PMAP_SUBOBJ_LOCK(pmap, lidx);
        pg = uvm_pagelookup(&pmap->pm_obj[lidx], ptp_va2o(va, level));
-       PMAP_SUBOBJ_UNLOCK(pmap, lidx);
 
        KASSERT(pg == NULL || pg->wire_count >= 1);
        return pg;
@@ -2019,8 +2006,6 @@
 
        obj = &pmap->pm_obj[lidx];
        pmap_stats_update(pmap, -1, 0);
-       if (lidx != 0)
-               mutex_enter(obj->vmobjlock);
        if (pmap->pm_ptphint[lidx] == ptp)
                pmap->pm_ptphint[lidx] = TAILQ_FIRST(&obj->memq);
        ptp->wire_count = 0;
@@ -2029,8 +2014,6 @@
        KASSERT((l->l_pflag & LP_INTR) == 0);
        VM_PAGE_TO_PP(ptp)->pp_link = l->l_md.md_gc_ptp;
        l->l_md.md_gc_ptp = ptp;
-       if (lidx != 0)
-               mutex_exit(obj->vmobjlock);
 }
 
 static void
@@ -2043,7 +2026,7 @@
        pd_entry_t opde;
 
        KASSERT(pmap != pmap_kernel());
-       KASSERT(mutex_owned(pmap->pm_lock));
+       KASSERT(mutex_owned(&pmap->pm_lock));
        KASSERT(kpreempt_disabled());
 
        level = 1;
@@ -2092,23 +2075,26 @@
  * => pmap should be locked
  * => preemption should be disabled
  */
-static struct vm_page *
-pmap_get_ptp(struct pmap *pmap, vaddr_t va, pd_entry_t * const *pdes, int flags)
+static int
+pmap_get_ptp(struct pmap *pmap, vaddr_t va, pd_entry_t * const *pdes, int flags,
+            struct vm_page **resultp)
 {
        struct vm_page *ptp;
        struct {
                struct vm_page *pg;
                bool new;
        } pt[PTP_LEVELS + 1];
-       int i, aflags;
+       int i, aflags, error;
        unsigned long index;
        pd_entry_t *pva;
        paddr_t pa;
        struct uvm_object *obj;
        voff_t off;
+       lwp_t *l;
+       uint64_t ncsw;
 
        KASSERT(pmap != pmap_kernel());
-       KASSERT(mutex_owned(pmap->pm_lock));
+       KASSERT(mutex_owned(&pmap->pm_lock));
        KASSERT(kpreempt_disabled());
 
        /*
@@ -2122,16 +2108,24 @@
                obj = &pmap->pm_obj[i - 2];
                off = ptp_va2o(va, i - 1);
 
-               PMAP_SUBOBJ_LOCK(pmap, i - 2);
                pt[i].pg = uvm_pagelookup(obj, off);
                if (pt[i].pg == NULL) {
+                       l = curlwp;
+                       ncsw = l->l_ncsw;
                        pt[i].pg = uvm_pagealloc(obj, off, NULL, aflags);
                        pt[i].new = true;
+                       if (__predict_false(ncsw != l->l_ncsw)) {
+                               /* uvm_pagealloc can block. */
+                               /* XXX silence assertion in pmap_unmap_ptes */
+                               pmap->pm_ncsw = l->l_ncsw;
+                               error = EAGAIN;
+                               goto fail;
+                       }
                }
-               PMAP_SUBOBJ_UNLOCK(pmap, i - 2);
-
-               if (pt[i].pg == NULL)
+               if (pt[i].pg == NULL) {
+                       error = ENOMEM;
                        goto fail;
+               }
        }
 
        /*
@@ -2183,7 +2177,8 @@
        ptp = pt[2].pg;
        KASSERT(ptp != NULL);
        pmap->pm_ptphint[0] = ptp;
-       return ptp;
+       *resultp = ptp;
+       return 0;
 
        /*
         * Allocation of a PTP failed, free any others that we just allocated.
@@ -2197,11 +2192,9 @@
                        continue;
                }
                obj = &pmap->pm_obj[i - 2];
-               PMAP_SUBOBJ_LOCK(pmap, i - 2);
                uvm_pagefree(pt[i].pg);
-               PMAP_SUBOBJ_UNLOCK(pmap, i - 2);
-       }
-       return NULL;
+       }
+       return error;
 }
 
 /*
@@ -2384,10 +2377,10 @@
        pmap = pool_cache_get(&pmap_cache, PR_WAITOK);
 
        /* init uvm_object */
+       mutex_init(&pmap->pm_lock, MUTEX_DEFAULT, IPL_NONE);
        for (i = 0; i < PTP_LEVELS - 1; i++) {
-               mutex_init(&pmap->pm_obj_lock[i], MUTEX_DEFAULT, IPL_NONE);
                uvm_obj_init(&pmap->pm_obj[i], NULL, false, 1);
-               uvm_obj_setlock(&pmap->pm_obj[i], &pmap->pm_obj_lock[i]);
+               uvm_obj_setlock(&pmap->pm_obj[i], &pmap->pm_lock);
                pmap->pm_ptphint[i] = NULL;
        }
        pmap->pm_stats.wired_count = 0;
@@ -2585,8 +2578,8 @@
 
        for (i = 0; i < PTP_LEVELS - 1; i++) {
                uvm_obj_destroy(&pmap->pm_obj[i], false);
-               mutex_destroy(&pmap->pm_obj_lock[i]);
-       }
+       }
+       mutex_destroy(&pmap->pm_lock);



Home | Main Index | Thread Index | Old Index