Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/hppa To allow concurrent page faults on the same se...



details:   https://anonhg.NetBSD.org/src/rev/b1e5f032279c
branches:  trunk
changeset: 971169:b1e5f032279c
user:      skrll <skrll%NetBSD.org@localhost>
date:      Thu Apr 16 09:51:56 2020 +0000

description:
To allow concurrent page faults on the same set of pages lock the PV
lists.  From ad@ and fixed up by me.

Remove __HAVE_UNLOCKED_PMAP

diffstat:

 sys/arch/hppa/hppa/pmap.c     |  223 ++++++++++++++++++++++++++++++++---------
 sys/arch/hppa/include/types.h |    5 +-
 2 files changed, 172 insertions(+), 56 deletions(-)

diffs (truncated from 563 to 300 lines):

diff -r 77f563f272c2 -r b1e5f032279c sys/arch/hppa/hppa/pmap.c
--- a/sys/arch/hppa/hppa/pmap.c Thu Apr 16 09:51:40 2020 +0000
+++ b/sys/arch/hppa/hppa/pmap.c Thu Apr 16 09:51:56 2020 +0000
@@ -1,7 +1,7 @@
-/*     $NetBSD: pmap.c,v 1.110 2020/04/16 05:22:59 skrll Exp $ */
+/*     $NetBSD: pmap.c,v 1.111 2020/04/16 09:51:56 skrll Exp $ */
 
 /*-
- * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
+ * Copyright (c) 2001, 2002, 2020 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -65,12 +65,14 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.110 2020/04/16 05:22:59 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.111 2020/04/16 09:51:56 skrll Exp $");
 
 #include "opt_cputype.h"
 
 #include <sys/param.h>
+#include <sys/atomic.h>
 #include <sys/mutex.h>
+#include <sys/pool.h>
 #include <sys/proc.h>
 #include <sys/rwlock.h>
 #include <sys/systm.h>
@@ -108,6 +110,14 @@
 
 static kmutex_t        pmaps_lock;
 
+static union pmap_pv_locks {
+       kmutex_t        lock;
+       char            padding[COHERENCY_UNIT];
+} pmap_pv_locks[64] __aligned(COHERENCY_UNIT);
+
+#define        PMAP_PV_LOCK(md) \
+    ((uintptr_t)(md) >> 7 & (__arraycount(pmap_pv_locks) - 1))
+
 u_int  hppa_prot[8];
 u_int  sid_counter;
 
@@ -140,6 +150,10 @@
 struct vm_page *pmap_pagealloc(struct uvm_object *, voff_t);
 void pmap_pagefree(struct vm_page *);
 
+static inline void pmap_lock(struct pmap *);
+static inline void pmap_unlock(struct pmap *);
+static inline bool pmap_trylock(struct pmap *);
+
 static inline void pmap_sdir_set(pa_space_t, volatile uint32_t *);
 static inline uint32_t *pmap_sdir_get(pa_space_t);
 
@@ -165,6 +179,9 @@
     vaddr_t , struct vm_page *, u_int);
 static inline struct pv_entry *pmap_pv_remove(struct vm_page *, pmap_t,
     vaddr_t);
+static inline void pmap_pv_lock(const struct vm_page_md *md);
+static inline void pmap_pv_unlock(const struct vm_page_md *md);
+static inline bool pmap_pv_locked(const struct vm_page_md *md);
 
 static inline void pmap_flush_page(struct vm_page *, bool);
 static int pmap_check_alias(struct vm_page *, vaddr_t, pt_entry_t);
@@ -195,17 +212,47 @@
 #define pmap_pvh_attrs(a) \
        (((a) & (PVF_MOD|PVF_REF)) ^ PVF_REF)
 
-#define PMAP_LOCK(pm)                                          \
-       do {                                                    \
-               if ((pm) != pmap_kernel())                      \
-                       rw_enter((pm)->pm_lock, RW_WRITER);     \
-       } while (/*CONSTCOND*/0)
+static inline void
+pmap_lock(struct pmap *pm)
+{
+
+       rw_enter(pm->pm_lock, RW_WRITER);
+}
+
+static inline void
+pmap_unlock(struct pmap *pm)
+{
+
+       rw_exit(pm->pm_lock);
+}
+
+static inline bool
+pmap_trylock(struct pmap *pm)
+{
+
+       return rw_tryenter(pm->pm_lock, RW_WRITER);
+}
 
-#define PMAP_UNLOCK(pm)                                                \
-       do {                                                    \
-               if ((pm) != pmap_kernel())                      \
-                       rw_exit((pm)->pm_lock);                 \
-       } while (/*CONSTCOND*/0)
+static inline void
+pmap_pv_lock(const struct vm_page_md *md)
+{
+
+       mutex_enter(&pmap_pv_locks[PMAP_PV_LOCK(md)].lock);
+}
+
+static inline void
+pmap_pv_unlock(const struct vm_page_md *md)
+{
+
+       mutex_exit(&pmap_pv_locks[PMAP_PV_LOCK(md)].lock);
+}
+
+static inline bool
+pmap_pv_locked(const struct vm_page_md *md)
+{
+
+       return mutex_owned(&pmap_pv_locks[PMAP_PV_LOCK(md)].lock);
+}
 
 struct vm_page *
 pmap_pagealloc(struct uvm_object *obj, voff_t off)
@@ -304,6 +351,7 @@
        UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pdep %#jx", (uintptr_t)pm,
            va, (uintptr_t)pdep, 0);
 
+
        KASSERT(pm != pmap_kernel());
        KASSERT(rw_write_held(pm->pm_lock));
 
@@ -559,7 +607,7 @@
        UVMHIST_LOG(maphist, "...pdep %#jx flags %#jx",
            (uintptr_t)pdep, flags, 0, 0);
 
-       KASSERT(pm == pmap_kernel() || uvm_page_owner_locked_p(pg, true));
+       KASSERT(pmap_pv_locked(md));
 
        pve->pv_pmap = pm;
        pve->pv_va = va | flags;
@@ -578,7 +626,7 @@
        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
        struct pv_entry **pve, *pv;
 
-       KASSERT(pmap == pmap_kernel() || uvm_page_owner_locked_p(pg, true));
+       KASSERT(pmap_pv_locked(md));
 
        for (pv = *(pve = &md->pvh_list);
            pv; pv = *(pve = &(*pve)->pv_next)) {
@@ -967,6 +1015,7 @@
 {
        extern void gateway_page(void);
        volatile pt_entry_t *pde;
+       int i;
 
        UVMHIST_FUNC(__func__)
        UVMHIST_CALLED(maphist);
@@ -995,6 +1044,9 @@
        pmap_pte_set(pde, SYSCALLGATE, (paddr_t)&gateway_page |
            PTE_PROT(TLB_GATE_PROT));
 
+       for (i = 0; i < __arraycount(pmap_pv_locks); i++)
+               mutex_init(&pmap_pv_locks[i].lock, MUTEX_DEFAULT, IPL_VM);
+
        pmap_initialized = true;
 
        UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
@@ -1082,13 +1134,8 @@
        struct vm_page *pg;
        off_t off;
 #endif
-       int refs;
 
-       rw_enter(pmap->pm_lock, RW_WRITER);
-       refs = --pmap->pm_obj.uo_refs;
-       rw_exit(pmap->pm_lock);
-
-       if (refs > 0)
+       if (atomic_dec_uint_nv(&pmap->pm_obj.uo_refs) > 0)
                return;
 
 #ifdef DIAGNOSTIC
@@ -1101,6 +1148,7 @@
                struct vm_page *spg;
                struct pv_entry *pv, *npv;
                paddr_t pa;
+               vaddr_t va;
 
                off = pg->offset + PAGE_SIZE;
                uvm_page_array_advance(&a);
@@ -1121,6 +1169,7 @@
                                continue;
 
                        struct vm_page_md * const md = VM_PAGE_TO_MD(spg);
+                       pmap_pv_lock(md);
                        for (pv = md->pvh_list; pv != NULL; pv = npv) {
                                npv = pv->pv_next;
                                if (pv->pv_pmap != pmap)
@@ -1129,9 +1178,14 @@
                                UVMHIST_LOG(maphist, " %#jx", pv->pv_va, 0, 0,
                                    0);
 
-                               pmap_remove(pmap, pv->pv_va & PV_VAMASK,
-                                   (pv->pv_va & PV_VAMASK) + PAGE_SIZE);
+                               va = pv->pv_va & PV_VAMASK;
+                               pmap_pv_unlock(md);
+                               pmap_remove(pmap, va, va + PAGE_SIZE);
+                               pmap_pv_lock(md);
+                               /* List may have changed: restart. */
+                               npv = md->pvh_list;
                        }
+                       pmap_pv_unlock(md);
                }
        }
        rw_exit(pmap->pm_lock);
@@ -1156,9 +1210,7 @@
        UVMHIST_FUNC(__func__)
        UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
 
-       rw_enter(pmap->pm_lock, RW_WRITER);
-       pmap->pm_obj.uo_refs++;
-       rw_exit(pmap->pm_lock);
+       atomic_inc_uint(&pmap->pm_obj.uo_refs);
 }
 
 
@@ -1207,12 +1259,12 @@
            (uintptr_t)pmap, va, pa, prot);
        UVMHIST_LOG(maphist, "...flags %#jx", flags, 0, 0, 0);
 
-       PMAP_LOCK(pmap);
+       pmap_lock(pmap);
 
        if (!(pde = pmap_pde_get(pmap->pm_pdir, va)) &&
            !(pde = pmap_pde_alloc(pmap, va, &ptp))) {
                if (flags & PMAP_CANFAIL) {
-                       PMAP_UNLOCK(pmap);
+                       pmap_unlock(pmap);
                        return (ENOMEM);
                }
 
@@ -1238,10 +1290,12 @@
                }
 
                if (pg != NULL) {
-                       pve = pmap_pv_remove(pg, pmap, va);
+                       struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 
-                       struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+                       pmap_pv_lock(md);
+                       pve = pmap_pv_remove(pg, pmap, va);
                        md->pvh_attrs |= pmap_pvh_attrs(pte);
+                       pmap_pv_unlock(md);
                }
        } else {
                UVMHIST_LOG(maphist, "new mapping %#jx -> %#jx",
@@ -1255,9 +1309,11 @@
        }
 
        if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) {
+               struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+
                if (!pve && !(pve = pmap_pv_alloc())) {
                        if (flags & PMAP_CANFAIL) {
-                               PMAP_UNLOCK(pmap);
+                               pmap_unlock(pmap);
                                return (ENOMEM);
                        }
                        panic("%s: no pv entries available", __func__);
@@ -1265,7 +1321,9 @@
                pte |= PTE_PROT(pmap_prot(pmap, prot));
                if (pmap_check_alias(pg, va, pte))
                        pmap_page_remove(pg);
+               pmap_pv_lock(md);
                pmap_pv_enter(pg, pve, pmap, va, ptp, 0);
+               pmap_pv_unlock(md);
        } else if (pve) {
                pmap_pv_free(pve);
        }
@@ -1291,7 +1349,7 @@
                pte |= PTE_PROT(TLB_WIRED);
        pmap_pte_set(pde, va, pte);
 
-       PMAP_UNLOCK(pmap);
+       pmap_unlock(pmap);
 
        UVMHIST_LOG(maphist, "<--- done (0)", 0, 0, 0, 0);
 
@@ -1308,7 +1366,6 @@
 void
 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
 {
-
        UVMHIST_FUNC(__func__);
        UVMHIST_CALLARGS(maphist, "sva %#jx eva %#jx", sva, eva, 0, 0);
 
@@ -1319,7 +1376,7 @@
        vaddr_t pdemask;
        int batch;
 



Home | Main Index | Thread Index | Old Index