Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/vax PR port-vax/55094: vax pmap needs locking adjus...
details: https://anonhg.NetBSD.org/src/rev/effbfc50e9de
branches: trunk
changeset: 746084:effbfc50e9de
user: ad <ad%NetBSD.org@localhost>
date: Sat Mar 21 18:47:54 2020 +0000
description:
PR port-vax/55094: vax pmap needs locking adjustments
Make the adjustments noted in the PR and don't call uvm_wait() or do
WAITOK ever - UVM takes care of that.
diffstat:
sys/arch/vax/include/pmap.h | 37 +--------
sys/arch/vax/vax/pmap.c | 177 +++++++++++++++++--------------------------
2 files changed, 73 insertions(+), 141 deletions(-)
diffs (truncated from 556 to 300 lines):
diff -r 81069e4de73d -r effbfc50e9de sys/arch/vax/include/pmap.h
--- a/sys/arch/vax/include/pmap.h Sat Mar 21 18:43:47 2020 +0000
+++ b/sys/arch/vax/include/pmap.h Sat Mar 21 18:47:54 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.81 2020/03/14 14:05:44 ad Exp $ */
+/* $NetBSD: pmap.h,v 1.82 2020/03/21 18:47:54 ad Exp $ */
/*
* Copyright (c) 1991 Regents of the University of California.
@@ -189,9 +189,6 @@
return (false);
}
-bool pmap_clear_modify_long(const struct pv_entry *);
-bool pmap_clear_reference_long(const struct pv_entry *);
-bool pmap_is_modified_long_p(const struct pv_entry *);
void pmap_page_protect_long(struct pv_entry *, vm_prot_t);
void pmap_protect_long(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
@@ -209,38 +206,6 @@
return (pv->pv_attr & PG_V) != 0;
}
-static __inline bool
-pmap_clear_reference(struct vm_page *pg)
-{
- struct pv_entry * const pv = pmap_pg_to_pv(pg);
- bool rv = (pv->pv_attr & PG_V) != 0;
-
- pv->pv_attr &= ~PG_V;
- if (pv->pv_pmap != NULL || pv->pv_next != NULL)
- rv |= pmap_clear_reference_long(pv);
- return rv;
-}
-
-static __inline bool
-pmap_clear_modify(struct vm_page *pg)
-{
- struct pv_entry * const pv = pmap_pg_to_pv(pg);
- bool rv = (pv->pv_attr & PG_M) != 0;
-
- pv->pv_attr &= ~PG_M;
- if (pv->pv_pmap != NULL || pv->pv_next != NULL)
- rv |= pmap_clear_modify_long(pv);
- return rv;
-}
-
-static __inline bool
-pmap_is_modified(struct vm_page *pg)
-{
- const struct pv_entry * const pv = pmap_pg_to_pv(pg);
-
- return (pv->pv_attr & PG_M) != 0 || pmap_is_modified_long_p(pv);
-}
-
static __inline void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
diff -r 81069e4de73d -r effbfc50e9de sys/arch/vax/vax/pmap.c
--- a/sys/arch/vax/vax/pmap.c Sat Mar 21 18:43:47 2020 +0000
+++ b/sys/arch/vax/vax/pmap.c Sat Mar 21 18:47:54 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.189 2020/02/15 18:12:14 ad Exp $ */
+/* $NetBSD: pmap.c,v 1.190 2020/03/21 18:47:54 ad Exp $ */
/*
* Copyright (c) 1994, 1998, 1999, 2003 Ludd, University of Lule}, Sweden.
* All rights reserved.
@@ -25,7 +25,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.189 2020/02/15 18:12:14 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.190 2020/03/21 18:47:54 ad Exp $");
#include "opt_ddb.h"
#include "opt_cputype.h"
@@ -114,29 +114,9 @@
static inline void
pmap_decrement_stats(struct pmap *pm, bool wired)
{
-#if defined(MULTIPROCESSOR)
- atomic_dec_ulong(&pm->pm_stats.resident_count);
- if (wired)
- atomic_dec_ulong(&pm->pm_stats.wired_count);
-#else
pm->pm_stats.resident_count--;
if (wired)
pm->pm_stats.wired_count--;
-#endif
-}
-
-static inline void
-pmap_increment_stats(struct pmap *pm, bool wired)
-{
-#if defined(MULTIPROCESSOR)
- atomic_inc_ulong(&pm->pm_stats.resident_count);
- if (wired)
- atomic_inc_ulong(&pm->pm_stats.wired_count);
-#else
- pm->pm_stats.resident_count++;
- if (wired)
- pm->pm_stats.wired_count++;
-#endif
}
/*
@@ -171,27 +151,18 @@
}
#ifdef PMAPDEBUG
-volatile int recurse;
-#define RECURSESTART { \
- if (recurse) \
- printf("enter at %d, previous %d\n", __LINE__, recurse);\
- recurse = __LINE__; \
-}
-#define RECURSEEND {recurse = 0; }
#define PMDEBUG(x) if (startpmapdebug)printf x
#else
-#define RECURSESTART
-#define RECURSEEND
#define PMDEBUG(x)
#endif
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
-static kmutex_t pvtable_lock;
-#define PVTABLE_LOCK mutex_spin_enter(&pvtable_lock);
-#define PVTABLE_UNLOCK mutex_spin_enter(&pvtable_lock);
+static kmutex_t pmap_lock;
+#define PMAP_LOCK mutex_spin_enter(&pmap_lock);
+#define PMAP_UNLOCK mutex_spin_enter(&pmap_lock);
#else
-#define PVTABLE_LOCK
-#define PVTABLE_UNLOCK
+#define PMAP_LOCK
+#define PMAP_UNLOCK
#endif
#ifdef PMAPDEBUG
@@ -204,7 +175,7 @@
struct pv_entry *get_pventry(void);
void free_pventry(struct pv_entry *);
void more_pventries(void);
-vaddr_t get_ptp(bool);
+vaddr_t get_ptp(void);
void free_ptp(paddr_t);
/*
@@ -434,7 +405,7 @@
SIMPLEQ_FIRST(&cpus) = ci;
#endif
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- mutex_init(&pvtable_lock, MUTEX_DEFAULT, IPL_VM);
+ mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_VM);
#endif
/*
@@ -606,18 +577,13 @@
* Allocate a page through direct-mapped segment.
*/
static vaddr_t
-getpage(bool wait)
+getpage(void)
{
struct vm_page *pg;
- for (;;) {
- pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
- if (pg != NULL)
- break;
- if (!wait)
- return 0;
- uvm_wait("getpage");
- }
+ pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
+ if (pg == NULL)
+ return 0;
return (VM_PAGE_TO_PHYS(pg)|KERNBASE);
}
@@ -813,9 +779,9 @@
p0lr = pm->pm_p0lr;
inuse = p0lr != 0;
len = round_page((reqlen+1) * PPTESZ);
- RECURSEEND;
+ PMAP_UNLOCK;
nptespc = pmap_getusrptes(pm, len);
- RECURSESTART;
+ PMAP_LOCK;
if (nptespc == 0)
return 0;
@@ -856,9 +822,9 @@
/* Get new pte space */
nlen = (NPTEPERREG*PPTESZ) - trunc_page(len * PPTESZ);
- RECURSEEND;
+ PMAP_UNLOCK;
nptespc = pmap_getusrptes(pm, nlen);
- RECURSESTART;
+ PMAP_LOCK;
if (nptespc == 0)
return 0;
@@ -1086,7 +1052,7 @@
PMDEBUG(("pmap_enter: pmap %p v %lx p %lx prot %x wired %d access %x\n",
pmap, v, p, prot, (flags & PMAP_WIRED) != 0, flags & VM_PROT_ALL));
- RECURSESTART;
+ PMAP_LOCK;
/* Find address of correct pte */
switch (SEGTYPE(v)) {
@@ -1124,10 +1090,12 @@
if (*ptpptr == 0) {
paddr_t phys;
- phys = get_ptp((flags & PMAP_CANFAIL) != 0);
+ phys = get_ptp();
if (phys == 0) {
- RECURSEEND;
- return ENOMEM;
+ PMAP_UNLOCK;
+ if ((flags & PMAP_CANFAIL) != 0)
+ return ENOMEM;
+ panic("pmap_enter: out of memory");
}
*ptpptr = PG_V | PG_KW | PG_PFNUM(phys);
}
@@ -1138,7 +1106,7 @@
*/
if (IOSPACE_P(p)) {
mapin8(pteptr, newpte);
- RECURSEEND;
+ PMAP_UNLOCK;
return 0;
}
@@ -1152,13 +1120,13 @@
if (newpte == (oldpte | PG_W)) {
*pteptr |= PG_W;
pmap->pm_stats.wired_count++;
- RECURSEEND;
+ PMAP_UNLOCK;
return 0;
}
/* mapping unchanged? just return. */
if (newpte == oldpte) {
- RECURSEEND;
+ PMAP_UNLOCK;
return 0;
}
@@ -1174,15 +1142,14 @@
*/
if (oldpte & PG_FRAME) {
- RECURSEEND;
if ((oldpte & PG_SREF) == 0)
rmpage(pmap, pteptr);
- else
+ else {
+ PMAP_UNLOCK;
panic("pmap_enter on PG_SREF page");
- RECURSESTART;
+ }
}
- PVTABLE_LOCK;
if (pv->pv_pmap == NULL) {
pv->pv_vaddr = v;
pv->pv_pmap = pmap;
@@ -1193,28 +1160,28 @@
tmp->pv_next = pv->pv_next;
pv->pv_next = tmp;
}
- PVTABLE_UNLOCK;
}
- pmap_increment_stats(pmap, (flags & PMAP_WIRED) != 0);
+ pmap->pm_stats.resident_count++;
+ if ((flags & PMAP_WIRED) != 0)
+ pmap->pm_stats.wired_count++;
- PVTABLE_LOCK;
if (flags & (VM_PROT_READ|VM_PROT_WRITE)) {
pv->pv_attr |= PG_V;
newpte |= PG_V;
}
if (flags & VM_PROT_WRITE)
pv->pv_attr |= PG_M;
- PVTABLE_UNLOCK;
if (flags & PMAP_WIRED)
newpte |= PG_V; /* Not allowed to be invalid */
mapin8(pteptr, newpte);
- RECURSEEND;
if (pventries < 10)
Home |
Main Index |
Thread Index |
Old Index