Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/x68k/x68k Sync with mvme68k.
details: https://anonhg.NetBSD.org/src/rev/a851d9244e08
branches: trunk
changeset: 518819:a851d9244e08
user: minoura <minoura%NetBSD.org@localhost>
date: Thu Dec 06 04:13:39 2001 +0000
description:
Sync with mvme68k.
diffstat:
sys/arch/x68k/x68k/pmap.c | 361 +++++++++++++--------------------------------
1 files changed, 105 insertions(+), 256 deletions(-)
diffs (truncated from 801 to 300 lines):
diff -r 2740da8a6b97 -r a851d9244e08 sys/arch/x68k/x68k/pmap.c
--- a/sys/arch/x68k/x68k/pmap.c Thu Dec 06 04:13:12 2001 +0000
+++ b/sys/arch/x68k/x68k/pmap.c Thu Dec 06 04:13:39 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.71 2001/11/25 15:42:23 minoura Exp $ */
+/* $NetBSD: pmap.c,v 1.72 2001/12/06 04:13:39 minoura Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -198,8 +198,7 @@
#define pmap_ste(m, v) \
(&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
#define pmap_ste_v(m, v) \
- ((*pmap_ste1(m, v) & SG_V) && \
- (*pmap_ste2(m, v) & SG_V))
+ ((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V))
#endif
#else
#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
@@ -224,7 +223,7 @@
/*
* Given a map and a machine independent protection code,
- * convert to an x68k protection code.
+ * convert to an m68k protection code.
*/
#define pte_prot(m, p) (protection_codes[p])
int protection_codes[8];
@@ -271,9 +270,6 @@
TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
int pv_nfree;
-#ifdef M68K_MMU_HP
-int pmap_aliasmask; /* seperation at which VA aliasing ok */
-#endif
#if defined(M68040) || defined(M68060)
int protostfree; /* prototype (default) free ST map */
#endif
@@ -316,7 +312,8 @@
* Internal routines
*/
void pmap_remove_mapping __P((pmap_t, vaddr_t, pt_entry_t *, int));
-boolean_t pmap_testbit __P((paddr_t, int));
+void pmap_do_remove __P((pmap_t, vaddr_t, vaddr_t, int));
+boolean_t pmap_testbit __P((paddr_t, int));
boolean_t pmap_changebit __P((paddr_t, int, int));
void pmap_enter_ptpage __P((pmap_t, vaddr_t));
void pmap_ptpage_addref __P((vaddr_t));
@@ -443,7 +440,7 @@
s += page_cnt * sizeof(struct pv_entry); /* pv table */
s += page_cnt * sizeof(char); /* attribute table */
s = round_page(s);
- addr = (vaddr_t) uvm_km_zalloc(kernel_map, s);
+ addr = uvm_km_zalloc(kernel_map, s);
if (addr == 0)
panic("pmap_init: can't allocate data structures");
@@ -498,7 +495,7 @@
* Now allocate the space and link the pages together to
* form the KPT free list.
*/
- addr = (vaddr_t) uvm_km_zalloc(kernel_map, s);
+ addr = uvm_km_zalloc(kernel_map, s);
if (addr == 0)
panic("pmap_init: cannot allocate KPT free list");
s = ptoa(npages);
@@ -561,7 +558,10 @@
* use the va's?
*/
#if defined (M68060)
- if (cputype == CPU_68060) {
+#if defined(M68020) || defined(M68030) || defined(M68040)
+ if (cputype == CPU_68060)
+#endif
+ {
struct kpt_page *kptp = kpt_free_list;
while (kptp) {
@@ -762,8 +762,7 @@
("pmap_create()\n"));
pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
-
- bzero(pmap, sizeof(*pmap));
+ memset(pmap, 0, sizeof(*pmap));
pmap_pinit(pmap);
return (pmap);
}
@@ -811,16 +810,17 @@
pmap_destroy(pmap)
pmap_t pmap;
{
+ int count;
+
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
- if (pmap->pm_count == 1) {
+ simple_lock(&pmap->pm_lock);
+ count = --pmap->pm_count;
+ simple_unlock(&pmap->pm_lock);
+ if (count == 0) {
pmap_release(pmap);
pool_put(&pmap_pmap_pool, pmap);
- } else {
- simple_lock(&pmap->pm_lock);
- pmap->pm_count--;
- simple_unlock(&pmap->pm_lock);
- }
+ }
}
/*
@@ -837,7 +837,9 @@
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
-#ifdef DIAGNOSTIC
+#ifdef notdef /* DIAGNOSTIC */
+ /* count would be 0 from pmap_destroy... */
+ simple_lock(&pmap->pm_lock);
if (pmap->pm_count != 1)
panic("pmap_release count");
#endif
@@ -921,16 +923,22 @@
pmap_t pmap;
vaddr_t sva, eva;
{
+ pmap_do_remove(pmap, sva, eva, 1);
+}
+
+void
+pmap_do_remove(pmap, sva, eva, remove_wired)
+ pmap_t pmap;
+ vaddr_t sva, eva;
+ int remove_wired;
+{
vaddr_t nssva;
pt_entry_t *pte;
- boolean_t firstpage, needcflush;
int flags;
PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
- firstpage = TRUE;
- needcflush = FALSE;
flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
while (sva < eva) {
nssva = x68k_trunc_seg(sva) + X68K_SEG_SIZE;
@@ -939,6 +947,7 @@
/*
* Invalidate every valid mapping within this segment.
+ * If remove_wired is zero, skip the wired pages.
*/
pte = pmap_pte(pmap, sva);
@@ -953,58 +962,15 @@
sva = nssva;
break;
}
- if (pmap_pte_v(pte)) {
-#ifdef M68K_MMU_HP
- if (pmap_aliasmask) {
- /*
- * Purge kernel side of VAC to ensure
- * we get the correct state of any
- * hardware maintained bits.
- */
- if (firstpage) {
- DCIS();
- }
- /*
- * Remember if we may need to
- * flush the VAC due to a non-CI
- * mapping.
- */
- if (!needcflush && !pmap_pte_ci(pte))
- needcflush = TRUE;
- }
-#endif
+ if (pmap_pte_v(pte) &&
+ (remove_wired || !pmap_pte_w(pte))) {
pmap_remove_mapping(pmap, sva, pte, flags);
- firstpage = FALSE;
}
pte++;
sva += NBPG;
}
}
- /*
- * Didn't do anything, no need for cache flushes
- */
- if (firstpage)
- return;
-#ifdef M68K_MMU_HP
- /*
- * In a couple of cases, we don't need to worry about flushing
- * the VAC:
- * 1. if this is a kernel mapping,
- * we have already done it
- * 2. if it is a user mapping not for the current process,
- * it won't be there
- */
- if (pmap_aliasmask && !active_user_pmap(pmap))
- needcflush = FALSE;
- if (needcflush) {
- if (pmap == pmap_kernel()) {
- DCIS();
- } else {
- DCIU();
- }
- }
-#endif
}
/*
@@ -1025,8 +991,7 @@
#ifdef DEBUG
if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
(prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
- printf("pmap_page_protect(%p, %x)\n", pg, prot);
-
+ printf("pmap_page_protect(%lx, %x)\n", pa, prot);
#endif
switch (prot) {
@@ -1080,7 +1045,7 @@
void
pmap_protect(pmap, sva, eva, prot)
pmap_t pmap;
- vaddr_t sva, eva;
+ vaddr_t sva, eva;
vm_prot_t prot;
{
vaddr_t nssva;
@@ -1092,6 +1057,9 @@
("pmap_protect(%p, %lx, %lx, %x)\n",
pmap, sva, eva, prot));
+#ifdef PMAPSTATS
+ protect_stats.calls++;
+#endif
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
pmap_remove(pmap, sva, eva);
return;
@@ -1118,18 +1086,6 @@
pte = pmap_pte(pmap, sva);
while (sva < nssva) {
if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
-#ifdef M68K_MMU_HP
- /*
- * Purge kernel side of VAC to ensure we
- * get the correct state of any hardware
- * maintained bits.
- *
- * XXX do we need to clear the VAC in
- * general to reflect the new protection?
- */
- if (firstpage && pmap_aliasmask)
- DCIS();
-#endif
#if defined(M68040) || defined(M68060)
/*
* Clear caches if making RO (see section
@@ -1276,6 +1232,7 @@
pv = pa_to_pvh(pa);
s = splvm();
+
PMAP_DPRINTF(PDB_ENTER,
("enter: pv at %p: %lx/%p/%p\n",
pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
@@ -1308,49 +1265,6 @@
npv->pv_ptpmap = NULL;
npv->pv_flags = 0;
pv->pv_next = npv;
-#ifdef M68K_MMU_HP
- /*
- * Since there is another logical mapping for the
- * same page we may need to cache-inhibit the
- * descriptors on those CPUs with external VACs.
- * We don't need to CI if:
- *
- * - No two mappings belong to the same user pmaps.
- * Since the cache is flushed on context switches
- * there is no problem between user processes.
- *
- * - Mappings within a single pmap are a certain
- * magic distance apart. VAs at these appropriate
- * boundaries map to the same cache entries or
- * otherwise don't conflict.
- *
- * To keep it simple, we only check for these special
- * cases if there are only two mappings, otherwise we
- * punt and always CI.
- *
- * Note that there are no aliasing problems with the
- * on-chip data-cache when the WA bit is set.
Home |
Main Index |
Thread Index |
Old Index