Subject: Re: "pmap_unwire: wiring ... didn't change!"
To: None <port-cobalt@NetBSD.org>
From: Izumi Tsutsui <tsutsui@ceres.dti.ne.jp>
List: port-cobalt
Date: 03/21/2005 02:58:40
In article <16930.54901.164252.179323@basis.lke.rad.klinikum.rwth-aachen.de>
kilbi@rad.rwth-aachen.de wrote:
> Just as a followup: Even pure read access still can produce data
> corruption, but much more seldomly than without this patch.
How about this one? (adding cache flush before/after KSEG0 access)
As discussed on port-mips this is not a "real" fix, but IMHO
this could be an acceptable compromise for current pmap,
which was written for MIPS1 PIPT cache, till MI uvm gets
some proper changes to handle VIPT cache.
(at least, I'd like to remove mips_sdcache_forceinv)
---
Izumi Tsutsui
Index: include/cache.h
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/include/cache.h,v
retrieving revision 1.7
diff -u -r1.7 cache.h
--- include/cache.h 1 Mar 2005 04:23:44 -0000 1.7
+++ include/cache.h 20 Mar 2005 17:41:51 -0000
@@ -185,8 +185,6 @@
extern int mips_scache_unified;
-extern u_int mips_sdcache_forceinv; /* force pmap to invalidate for r5ksc */
-
/* TERTIARY CACHE VARIABLES */
extern u_int mips_tcache_size; /* always unified */
extern u_int mips_tcache_line_size;
@@ -201,6 +199,8 @@
extern u_int mips_cache_alias_mask;
extern u_int mips_cache_prefer_mask;
+extern int mips_cache_virtual_alias;
+
/*
* XXX XXX XXX THIS SHOULD NOT EXIST XXX XXX XXX
*/
Index: include/pmap.h
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/include/pmap.h,v
retrieving revision 1.46
diff -u -r1.46 pmap.h
--- include/pmap.h 17 Jan 2005 04:54:14 -0000 1.46
+++ include/pmap.h 20 Mar 2005 17:41:51 -0000
@@ -180,8 +180,10 @@
/*
* Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
*/
-#define PMAP_MAP_POOLPAGE(pa) MIPS_PHYS_TO_KSEG0((pa))
-#define PMAP_UNMAP_POOLPAGE(va) MIPS_KSEG0_TO_PHYS((va))
+vaddr_t mips_pmap_map_poolpage(paddr_t);
+paddr_t mips_pmap_unmap_poolpage(vaddr_t);
+#define PMAP_MAP_POOLPAGE(pa) mips_pmap_map_poolpage(pa)
+#define PMAP_UNMAP_POOLPAGE(va) mips_pmap_unmap_poolpage(va)
/*
* Other hooks for the pool allocator.
Index: mips/cache.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/mips/cache.c,v
retrieving revision 1.26
diff -u -r1.26 cache.c
--- mips/cache.c 1 Mar 2005 04:23:44 -0000 1.26
+++ mips/cache.c 20 Mar 2005 17:41:51 -0000
@@ -129,8 +129,6 @@
int mips_scache_unified;
-u_int mips_sdcache_forceinv = 0;
-
/* TERTIARY CACHE VARIABLES */
u_int mips_tcache_size; /* always unified */
u_int mips_tcache_line_size;
@@ -154,6 +152,8 @@
u_int mips_cache_alias_mask; /* for virtually-indexed caches */
u_int mips_cache_prefer_mask;
+int mips_cache_virtual_alias;
+
struct mips_cache_ops mips_cache_ops;
#ifdef MIPS1
@@ -421,6 +421,11 @@
mips3_get_cache_config(csizebase);
+ if (mips_picache_size > PAGE_SIZE ||
+ mips_pdcache_size > PAGE_SIZE)
+ /* no VCE support if there is no L2 cache */
+ mips_cache_virtual_alias = 1;
+
switch (mips_picache_line_size) {
case 16:
mips_cache_ops.mco_icache_sync_all =
@@ -494,6 +499,10 @@
mips3_get_cache_config(csizebase);
+ if (mips_picache_size > PAGE_SIZE ||
+ mips_pdcache_size > PAGE_SIZE)
+ mips_cache_virtual_alias = 1;
+
switch (mips_picache_line_size) {
case 32:
mips_cache_ops.mco_icache_sync_all =
@@ -590,6 +599,7 @@
~(PAGE_SIZE - 1);
mips_cache_prefer_mask =
max(mips_pdcache_size, mips_picache_size) - 1;
+ mips_cache_virtual_alias = 1;
/* cache ops */
mips_cache_ops.mco_icache_sync_all =
r5900_icache_sync_all_64;
@@ -619,6 +629,8 @@
mips4_get_cache_config(csizebase);
+ /* VCE is handled by hardware */
+
mips_cache_ops.mco_icache_sync_all =
r10k_icache_sync_all;
mips_cache_ops.mco_icache_sync_range =
@@ -669,14 +681,22 @@
switch (MIPS_PRID_IMPL(cpu_id)) {
#if defined(MIPS3) || defined(MIPS4)
case MIPS_R4000:
+#if 0
/*
* R4000/R4400 always detects virtual alias as if
* primary cache size is 32KB. Actual primary cache size
* is ignored wrt VCED/VCEI.
*/
+ /*
+ * XXX
+ * It's still better to avoid virtual alias even with VCE,
+ * isn't it?
+ */
mips_cache_alias_mask =
(MIPS3_MAX_PCACHE_SIZE - 1) & ~(PAGE_SIZE - 1);
mips_cache_prefer_mask = MIPS3_MAX_PCACHE_SIZE - 1;
+#endif
+ mips_cache_virtual_alias = 0;
/* FALLTHROUGH */
case MIPS_R4600:
#ifdef ENABLE_MIPS_R4700
Index: mips/mem.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/mips/mem.c,v
retrieving revision 1.29
diff -u -r1.29 mem.c
--- mips/mem.c 7 Aug 2003 16:28:33 -0000 1.29
+++ mips/mem.c 20 Mar 2005 17:41:51 -0000
@@ -76,6 +76,9 @@
* Memory special file
*/
+#include "opt_cputype.h"
+#include "opt_mips_cache.h"
+
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: mem.c,v 1.29 2003/08/07 16:28:33 agc Exp $");
@@ -89,6 +92,8 @@
#include <machine/cpu.h>
+#include <mips/cache.h>
+
#include <uvm/uvm_extern.h>
extern paddr_t avail_end;
@@ -142,6 +147,8 @@
return (EFAULT);
v += MIPS_KSEG0_START;
error = uiomove((void *)v, c, uio);
+ if (mips_cache_virtual_alias)
+ mips_dcache_wbinv_range(v, c);
continue;
case DEV_KMEM:
@@ -156,6 +163,8 @@
uio->uio_rw == UIO_READ ? B_READ : B_WRITE)))
return (EFAULT);
error = uiomove((void *)v, c, uio);
+ if (mips_cache_virtual_alias)
+ mips_dcache_wbinv_range(v, c);
continue;
case DEV_NULL:
Index: mips/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/mips/pmap.c,v
retrieving revision 1.157
diff -u -r1.157 pmap.c
--- mips/pmap.c 1 Mar 2005 04:23:44 -0000 1.157
+++ mips/pmap.c 20 Mar 2005 17:41:52 -0000
@@ -643,8 +643,8 @@
* is reused with KSEG2 (mapped) addresses. This may
* cause problems on machines without secondary caches.
*/
- if (MIPS_HAS_R4K_MMU)
- mips_dcache_wbinv_range((vaddr_t) pte,
+ if (mips_cache_virtual_alias)
+ mips_dcache_inv_range((vaddr_t) pte,
PAGE_SIZE);
#endif /* MIPS3_PLUS */
uvm_pagefree(PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(pte)));
@@ -1219,11 +1219,17 @@
#endif
if (pmap == pmap_kernel()) {
- if (need_enter_pv)
- pmap_enter_pv(pmap, va, pa, &npte);
-
/* enter entries into kernel pmap */
pte = kvtopte(va);
+ if (mips_pg_v(pte->pt_entry) &&
+ mips_tlbpfn_to_paddr(pte->pt_entry) != pa) {
+ pmap_remove(pmap, va, va + NBPG);
+#ifdef DEBUG
+ enter_stats.mchange++;
+#endif
+ }
+ if (need_enter_pv)
+ pmap_enter_pv(pmap, va, pa, &npte);
if (MIPS_HAS_R4K_MMU)
npte |= mips_paddr_to_tlbpfn(pa) | MIPS3_PG_G;
@@ -1234,13 +1240,7 @@
pmap->pm_stats.wired_count++;
npte |= mips_pg_wired_bit();
}
- if (mips_pg_v(pte->pt_entry) &&
- mips_tlbpfn_to_paddr(pte->pt_entry) != pa) {
- pmap_remove(pmap, va, va + NBPG);
-#ifdef DEBUG
- enter_stats.mchange++;
-#endif
- }
+
if (!mips_pg_v(pte->pt_entry))
pmap->pm_stats.resident_count++;
pte->pt_entry = npte;
@@ -1274,12 +1274,19 @@
#endif
}
+ pte += (va >> PGSHIFT) & (NPTEPG - 1);
+ if (mips_pg_v(pte->pt_entry) &&
+ mips_tlbpfn_to_paddr(pte->pt_entry) != pa) {
+ pmap_remove(pmap, va, va + NBPG);
+#ifdef DEBUG
+ enter_stats.mchange++;
+#endif
+ }
+
/* Done after case that may sleep/return. */
if (need_enter_pv)
pmap_enter_pv(pmap, va, pa, &npte);
- pte += (va >> PGSHIFT) & (NPTEPG - 1);
-
/*
* Now validate mapping with desired protection/wiring.
* Assume uniform modified and referenced status for all
@@ -1318,13 +1325,6 @@
#endif
asid = pmap->pm_asid << MIPS_TLB_PID_SHIFT;
- if (mips_pg_v(pte->pt_entry) &&
- mips_tlbpfn_to_paddr(pte->pt_entry) != pa) {
- pmap_remove(pmap, va, va + NBPG);
-#ifdef DEBUG
- enter_stats.mchange++;
-#endif
- }
if (!mips_pg_v(pte->pt_entry))
pmap->pm_stats.resident_count++;
@@ -1577,6 +1577,11 @@
pmap_zero_page(phys)
paddr_t phys;
{
+ vaddr_t va;
+#if defined(MIPS3_PLUS)
+ pv_entry_t pv;
+#endif
+
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_zero_page(%lx)\n", (u_long)phys);
@@ -1585,8 +1590,18 @@
if (! (phys < MIPS_MAX_MEM_ADDR))
printf("pmap_zero_page(%lx) nonphys\n", (u_long)phys);
#endif
+ va = MIPS_PHYS_TO_KSEG0(phys);
+
+#if defined(MIPS3_PLUS) /* XXX mmu XXX */
+ pv = pa_to_pvh(phys);
+ if (MIPS_HAS_R4K_MMU &&
+ (pv->pv_flags & PV_UNCACHED) == 0 &&
+ mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va)) {
+ mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
+ }
+#endif
- mips_pagezero((caddr_t)MIPS_PHYS_TO_KSEG0(phys));
+ mips_pagezero((caddr_t)va);
#if defined(MIPS3_PLUS) /* XXX mmu XXX */
/*
@@ -1598,9 +1613,8 @@
*
* XXXJRT This is totally disgusting.
*/
- if (MIPS_HAS_R4K_MMU &&
- ( (mips_sdcache_line_size == 0) || (mips_sdcache_forceinv) ) )
- mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(phys), NBPG);
+ if (MIPS_HAS_R4K_MMU) /* XXX VCED on kernel stack is not allowed */
+ mips_dcache_wbinv_range(va, PAGE_SIZE);
#endif /* MIPS3_PLUS */
}
@@ -1636,11 +1650,12 @@
* It would probably be better to map the destination as a
* write-through no allocate to reduce cache thrash.
*/
- if (MIPS_HAS_R4K_MMU &&
- ( (mips_sdcache_line_size == 0) || (mips_sdcache_forceinv)) ) {
+ if (mips_cache_virtual_alias) {
/*XXX FIXME Not very sophisticated */
mips_flushcache_allpvh(src);
-/* mips_flushcache_allpvh(dst); */
+#if 0
+ mips_flushcache_allpvh(dst);
+#endif
}
#endif /* MIPS3_PLUS */
@@ -1659,10 +1674,9 @@
*
* XXXJRT -- This is totally disgusting.
*/
- if (MIPS_HAS_R4K_MMU &&
- ( (mips_sdcache_line_size == 0) || (mips_sdcache_forceinv)) ) {
- mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(src), NBPG);
- mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(dst), NBPG);
+ if (mips_cache_virtual_alias) {
+ mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(src), PAGE_SIZE);
+ mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(dst), PAGE_SIZE);
}
#endif /* MIPS3_PLUS */
}
@@ -1891,7 +1905,7 @@
pv->pv_next = NULL;
} else {
#if defined(MIPS3_PLUS) /* XXX mmu XXX */
- if (MIPS_HAS_R4K_MMU && mips_sdcache_line_size == 0) {
+ if (mips_cache_virtual_alias) {
/*
* There is at least one other VA mapping this page.
* Check if they are cache index compatible.
@@ -2089,7 +2103,7 @@
pmap_page_cache(pa, 0);
}
#endif
- if (MIPS_HAS_R4K_MMU && last != 0)
+ if (last != 0 && mips_cache_virtual_alias)
mips_dcache_wbinv_range_index(va, PAGE_SIZE);
#endif /* MIPS3_PLUS */
}
@@ -2103,12 +2117,27 @@
pmap_pv_page_alloc(struct pool *pp, int flags)
{
struct vm_page *pg;
+ paddr_t phys;
+#if defined(MIPS3_PLUS)
+ pv_entry_t pv;
+#endif
+ vaddr_t va;
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
if (pg == NULL) {
return NULL;
}
- return ((void *)MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)));
+ phys = VM_PAGE_TO_PHYS(pg);
+ va = MIPS_PHYS_TO_KSEG0(phys);
+#if defined(MIPS3_PLUS)
+ pv = pa_to_pvh(phys);
+ if (mips_cache_virtual_alias) {
+ if ((pv->pv_flags & PV_UNCACHED) == 0 &&
+ mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va))
+ mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
+ }
+#endif
+ return ((void *)va);
}
/*
@@ -2119,6 +2148,11 @@
void
pmap_pv_page_free(struct pool *pp, void *v)
{
+
+#ifdef MIPS3_PLUS
+ if (mips_cache_virtual_alias)
+ mips_dcache_inv_range((vaddr_t)v, PAGE_SIZE);
+#endif
uvm_pagefree(PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS((vaddr_t)v)));
}
@@ -2165,3 +2199,37 @@
/******************** page table page management ********************/
/* TO BE DONE */
+
+vaddr_t
+mips_pmap_map_poolpage(paddr_t pa)
+{
+ vaddr_t va;
+#if defined(MIPS3_PLUS)
+ pv_entry_t pv;
+#endif
+
+ va = MIPS_PHYS_TO_KSEG0(pa);
+#if defined(MIPS3_PLUS)
+ pv = pa_to_pvh(pa);
+ if (mips_cache_virtual_alias) {
+ if ((pv->pv_flags & PV_UNCACHED) == 0 &&
+ mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va))
+ mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
+ }
+#endif
+ return va;
+}
+
+paddr_t
+mips_pmap_unmap_poolpage(vaddr_t va)
+{
+ paddr_t pa;
+
+ pa = MIPS_KSEG0_TO_PHYS(va);
+#if defined(MIPS3_PLUS)
+ if (mips_cache_virtual_alias) {
+ mips_dcache_inv_range(va, PAGE_SIZE);
+ }
+#endif
+ return pa;
+}
Index: mips/vm_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/mips/vm_machdep.c,v
retrieving revision 1.105
diff -u -r1.105 vm_machdep.c
--- mips/vm_machdep.c 1 Jan 2005 03:25:46 -0000 1.105
+++ mips/vm_machdep.c 20 Mar 2005 17:41:52 -0000
@@ -131,18 +131,6 @@
pt_entry_t *pte;
int i, x;
-#ifdef MIPS3_PLUS
- /*
- * To eliminate virtual aliases created by pmap_zero_page(),
- * this cache flush operation is necessary.
- * VCED on kernel stack is not allowed.
- * XXXJRT Confirm that this is necessry, and/or fix
- * XXXJRT pmap_zero_page().
- */
- if (CPUISMIPS3 && mips_sdcache_line_size)
- mips_dcache_wbinv_range((vaddr_t) l2->l_addr, USPACE);
-#endif
-
#ifdef DIAGNOSTIC
/*
* If l1 != curlwp && l1 == &lwp0, we're creating a kernel thread.
@@ -333,7 +321,8 @@
if (pmap_extract(upmap, uva, &pa) == FALSE)
panic("vmapbuf: null page frame");
pmap_enter(vm_map_pmap(phys_map), kva, pa,
- VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
+ VM_PROT_READ | VM_PROT_WRITE,
+ VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
uva += PAGE_SIZE;
kva += PAGE_SIZE;
len -= PAGE_SIZE;