Subject: Re: Using different cache modes for r/o vs r/w pages
To: None <Richard.Earnshaw@arm.com>
From: Jason R Thorpe <thorpej@wasabisystems.com>
List: port-arm
Date: 01/30/2002 15:21:16
--NHfequSh1hmJPP0s
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline

On Wed, Jan 30, 2002 at 12:08:05PM +0000, Richard Earnshaw wrote:

 > Given your need to recalculate the cacheable bits here, the above approach 
 > may no-longer be the best.  It might be the case that we should now make 
 > the cacheability of a page based on the current PTE writable attribute 
 > rather than the UVM attributes for the page.

Ok.  Here's a rough patch (kernel not booted yet) that still needs some
improvement.  XXXJRT is next to stuff that needs to change.

Basically, now there's prototype PTE arrays.  All the interfaces that
currently take PTE bits need to start taking actual attribute bits
instead.  I need to add L1 section prototypes yet, as well, since
they, too, have extended cache modes on the XScale.

So, when a PTE is frobbed, it's now frobbed like:

	*pte = (*pte & PG_FRAME) | pmap_pte_proto(pmap, new_prot, new_cache);

This allows me to set up the correct L2 descriptor type at boot time, and
eliminates the need for pte_cache_bits, as well.

I'll boot it after I get back from the gym.

-- 
        -- Jason R. Thorpe <thorpej@wasabisystems.com>

--NHfequSh1hmJPP0s
Content-Type: text/plain; charset=us-ascii
Content-Description: pmap-cache-take2
Content-Disposition: attachment; filename=foo

Index: arm/arm/cpufunc.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm/cpufunc.c,v
retrieving revision 1.29
diff -c -r1.29 cpufunc.c
*** arm/arm/cpufunc.c	2002/01/30 00:37:18	1.29
--- arm/arm/cpufunc.c	2002/01/30 23:14:12
***************
*** 53,58 ****
--- 53,61 ----
  #include <sys/types.h>
  #include <sys/param.h>
  #include <sys/systm.h>
+ 
+ #include <uvm/uvm_extern.h>
+ 
  #include <machine/cpu.h>
  #include <machine/bootconfig.h>
  #include <arch/arm/arm/disassem.h>
***************
*** 650,655 ****
--- 653,659 ----
  		cpu_reset_needs_v4_MMU_disable = 0;
  		/* XXX Cache info? */
  		arm_dcache_align_mask = -1;
+ 		pmap_pte_protos_init_arm678();
  		return 0;
  	}
  #endif	/* CPU_ARM6 */
***************
*** 661,666 ****
--- 665,671 ----
  		cpu_reset_needs_v4_MMU_disable = 0;
  		/* XXX Cache info? */
  		arm_dcache_align_mask = -1;
+ 		pmap_pte_protos_init_arm678();
  		return 0;
  	}
  #endif	/* CPU_ARM7 */
***************
*** 671,676 ****
--- 676,682 ----
  		cpufuncs = arm7tdmi_cpufuncs;
  		cpu_reset_needs_v4_MMU_disable = 0;
  		get_cachetype();
+ 		pmap_pte_protos_init_arm678();
  		return 0;
  	}
  #endif	
***************
*** 680,694 ****
  		cpufuncs = arm8_cpufuncs;
  		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
  		get_cachetype();
  		return 0;
  	}
  #endif	/* CPU_ARM8 */
  #ifdef CPU_ARM9
  	if (cputype == CPU_ID_ARM920T) {
- 		pte_cache_mode = PT_C;	/* Select write-through cacheing. */
  		cpufuncs = arm9_cpufuncs;
  		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
  		get_cachetype();
  		return 0;
  	}
  #endif /* CPU_ARM9 */
--- 686,701 ----
  		cpufuncs = arm8_cpufuncs;
  		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
  		get_cachetype();
+ 		pmap_pte_protos_init_arm678();
  		return 0;
  	}
  #endif	/* CPU_ARM8 */
  #ifdef CPU_ARM9
  	if (cputype == CPU_ID_ARM920T) {
  		cpufuncs = arm9_cpufuncs;
  		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
  		get_cachetype();
+ 		pmap_pte_protos_init_arm9();
  		return 0;
  	}
  #endif /* CPU_ARM9 */
***************
*** 698,703 ****
--- 705,711 ----
  		cpufuncs = sa110_cpufuncs;
  		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
  		get_cachetype();
+ 		pmap_pte_protos_init_arm678();		/* XXX */
  		/*
  		 * Enable the right variant of sleeping.
  		 */
***************
*** 743,749 ****
  			:
  			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
  
- 		pte_cache_mode = PT_C;	/* Select write-through cacheing. */
  		cpufuncs = xscale_cpufuncs;
  
  		/*
--- 751,756 ----
***************
*** 758,763 ****
--- 765,771 ----
  
  		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
  		get_cachetype();
+ 		pmap_pte_protos_init_xscale();
  		return 0;
  	}
  #endif /* CPU_XSCALE */
Index: arm/arm32/arm32_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm32/arm32_machdep.c,v
retrieving revision 1.11
diff -c -r1.11 arm32_machdep.c
*** arm/arm32/arm32_machdep.c	2002/01/20 03:41:47	1.11
--- arm/arm32/arm32_machdep.c	2002/01/30 23:14:13
***************
*** 168,174 ****
  
  	if (cacheable)
  		((u_int *)pagetable)[(va >> PDSHIFT)] =
! 		    L1_SEC((pa & PD_MASK), pte_cache_mode);
  	else
  		((u_int *)pagetable)[(va >> PDSHIFT)] =
  		    L1_SEC((pa & PD_MASK), 0);
--- 168,174 ----
  
  	if (cacheable)
  		((u_int *)pagetable)[(va >> PDSHIFT)] =
! 		    L1_SEC((pa & PD_MASK), (PT_B|PT_C)); /* XXXJRT */
  	else
  		((u_int *)pagetable)[(va >> PDSHIFT)] =
  		    L1_SEC((pa & PD_MASK), 0);
***************
*** 255,264 ****
  #ifdef VERBOSE_INIT_ARM
  			printf("P");
  #endif
  #ifndef cats			
! 			l2pt[((va >> PGSHIFT) & 0x3ff)] = L2_SPTE(pa, acc, flg);
  #else
! 			l2pt[((va >> PGSHIFT) & 0x7ff)] = L2_SPTE(pa, acc, flg);
  #endif
  			va += NBPG;
  			pa += NBPG;
--- 255,267 ----
  #ifdef VERBOSE_INIT_ARM
  			printf("P");
  #endif
+ 			/* XXXJRT Clean me up */
  #ifndef cats			
! 			l2pt[((va >> PGSHIFT) & 0x3ff)] =
! 			    pa | PT_AP(acc) | flg | L2_SPAGE;
  #else
! 			l2pt[((va >> PGSHIFT) & 0x7ff)] =
! 			    pa | PT_AP(acc) | flg | L2_SPAGE;
  #endif
  			va += NBPG;
  			pa += NBPG;
***************
*** 280,289 ****
  {
  #ifndef cats
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
! 	    L2_PTE((pa & PG_FRAME), AP_KRW);
  #else
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
! 	    L2_PTE((pa & PG_FRAME), AP_KRW);
  #endif	
  }
  
--- 283,296 ----
  {
  #ifndef cats
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
! 	    (pa & PG_FRAME) | pte_proto(PTE_PROTO_KERNEL,
! 					VM_PROT_READ|VM_PROT_WRITE,
! 					PTE_PROTO_CACHE);
  #else
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
! 	    (pa & PG_FRAME) | pte_proto(PTE_PROTO_KERNEL,
! 					VM_PROT_READ|VM_PROT_WRITE,
! 					PTE_PROTO_CACHE);
  #endif	
  }
  
***************
*** 296,305 ****
  {
  #ifndef cats
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
! 	    L2_PTE_NC_NB((pa & PG_FRAME), AP_KRW);
  #else
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
! 	    L2_PTE_NC_NB((pa & PG_FRAME), AP_KRW);
  #endif
  }
  
--- 303,316 ----
  {
  #ifndef cats
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
! 	    (pa & PG_FRAME) | pte_proto(PTE_PROTO_KERNEL,
! 					VM_PROT_READ|VM_PROT_WRITE,
! 					PTE_PROTO_NOCACHE);
  #else
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
! 	    (pa & PG_FRAME) | pte_proto(PTE_PROTO_KERNEL,
! 					VM_PROT_READ|VM_PROT_WRITE,
! 					PTE_PROTO_NOCACHE);
  #endif
  }
  
***************
*** 312,321 ****
  {
  #ifndef cats
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
! 	    L2_PTE((pa & PG_FRAME), AP_KR);
  #else
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
! 	    L2_PTE((pa & PG_FRAME), AP_KR);
  #endif
  }
  
--- 323,336 ----
  {
  #ifndef cats
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
! 	    (pa & PG_FRAME) | pte_proto(PTE_PROTO_KERNEL,
! 					VM_PROT_READ,
! 					PTE_PROTO_CACHE);
  #else
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
! 	    (pa & PG_FRAME) | pte_proto(PTE_PROTO_KERNEL,
! 					VM_PROT_READ,
! 					PTE_PROTO_CACHE);
  #endif
  }
  
***************
*** 486,493 ****
  void
  zero_page_readonly()
  {
  	WriteWord(PROCESS_PAGE_TBLS_BASE + 0,
! 	    L2_PTE((systempage.pv_pa & PG_FRAME), AP_KR));
  	cpu_tlb_flushID_SE(0x00000000);
  }
  
--- 501,512 ----
  void
  zero_page_readonly()
  {
+ 
+ 	/* XXXJRT Do we really care about caching page0?! */
  	WriteWord(PROCESS_PAGE_TBLS_BASE + 0,
! 	    systempage.pv_pa | pte_proto(PTE_PROTO_KERNEL,
! 					 VM_PROT_READ,
! 					 PTE_PROTO_CACHE));
  	cpu_tlb_flushID_SE(0x00000000);
  }
  
***************
*** 502,509 ****
  void
  zero_page_readwrite()
  {
  	WriteWord(PROCESS_PAGE_TBLS_BASE + 0,
! 	    L2_PTE((systempage.pv_pa & PG_FRAME), AP_KRW));
  	cpu_tlb_flushID_SE(0x00000000);
  }
  
--- 521,532 ----
  void
  zero_page_readwrite()
  {
+ 
+ 	/* XXXJRT See above. */
  	WriteWord(PROCESS_PAGE_TBLS_BASE + 0,
! 	    systempage.pv_pa | pte_proto(PTE_PROTO_KERNEL,
! 					 VM_PROT_READ|VM_PROT_WRITE,
! 					 PTE_PROTO_CACHE));
  	cpu_tlb_flushID_SE(0x00000000);
  }
  
Index: arm/arm32/bus_dma.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm32/bus_dma.c,v
retrieving revision 1.8
diff -c -r1.8 bus_dma.c
*** arm/arm32/bus_dma.c	2002/01/25 20:57:41	1.8
--- arm/arm32/bus_dma.c	2002/01/30 23:14:13
***************
*** 547,553 ****
  				cpu_dcache_wbinv_range(va, NBPG);
  				cpu_drain_writebuf();
  				ptep = vtopte(va);
! 				*ptep = ((*ptep) & (~PT_C | PT_B));
  				tlb_flush();
  			}
  #ifdef DEBUG_DMA
--- 547,556 ----
  				cpu_dcache_wbinv_range(va, NBPG);
  				cpu_drain_writebuf();
  				ptep = vtopte(va);
! 				*ptep = (*ptep & PG_FRAME) |
! 				    pmap_pte_proto(pmap_kernel(),
! 						   VM_PROT_READ|VM_PROT_WRITE,
! 						   PTE_PROTO_NOCACHE);
  				tlb_flush();
  			}
  #ifdef DEBUG_DMA
Index: arm/arm32/pmap.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm32/pmap.c,v
retrieving revision 1.36
diff -c -r1.36 pmap.c
*** arm/arm32/pmap.c	2002/01/25 19:19:25	1.36
--- arm/arm32/pmap.c	2002/01/30 23:14:17
***************
*** 193,198 ****
--- 193,199 ----
  extern caddr_t msgbufaddr;
  
  boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
+ 
  /*
   * locking data structures
   */
***************
*** 326,336 ****
      pt_entry_t *, boolean_t));
  
  /*
!  * Cache enable bits in PTE to use on pages that are cacheable.
!  * On most machines this is cacheable/bufferable, but on some, eg arm10, we
!  * can chose between write-through and write-back cacheing.
   */
! pt_entry_t pte_cache_mode = (PT_C | PT_B);
  
  /*
   * real definition of pv_entry.
--- 327,335 ----
      pt_entry_t *, boolean_t));
  
  /*
!  * Prototype PTE array.  These are initialized in pmap_pte_protos_init_*().
   */
! pt_entry_t pte_protos[4][8];
  
  /*
   * real definition of pv_entry.
***************
*** 943,954 ****
  	pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
  
  	PDEBUG(0, printf("pt self reference %lx in %lx\n",
! 	    L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
  
  	/* Map the page table into the page table area. */
  	if (selfref) {
  		*((pt_entry_t *)(pmap->pm_vptpt + ptva)) =
! 			L2_PTE_NC_NB(l2pa, AP_KRW);
  	}
  	/* XXX should be a purge */
  /*	cpu_tlb_flushD();*/
--- 942,957 ----
  	pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
  
  	PDEBUG(0, printf("pt self reference %lx in %lx\n",
! 	    l2pa | pmap_pte_proto(pmap_kernel(),
! 				  VM_PROT_READ|VM_PROT_WRITE,
! 				  PTE_PROTO_NOCACHE)));
  
  	/* Map the page table into the page table area. */
  	if (selfref) {
  		*((pt_entry_t *)(pmap->pm_vptpt + ptva)) =
! 		    l2pa | pmap_pte_proto(pmap_kernel(),
! 					  VM_PROT_READ|VM_PROT_WRITE,
! 					  PTE_PROTO_NOCACHE);
  	}
  	/* XXX should be a purge */
  /*	cpu_tlb_flushD();*/
***************
*** 1392,1398 ****
  
  		/* Revoke cacheability and bufferability */
  		/* XXX should be done better than this */
! 		ptes[arm_byte_to_page(va)] &= ~(PT_C | PT_B);
  
  		va += NBPG;
  		m = m->pageq.tqe_next;
--- 1395,1405 ----
  
  		/* Revoke cacheability and bufferability */
  		/* XXX should be done better than this */
! 		ptes[arm_byte_to_page(va)] =
! 		    (ptes[arm_byte_to_page(va)] & PG_FRAME) |
! 		    pmap_pte_proto(pmap_kernel(),
! 				   VM_PROT_READ|VM_PROT_WRITE,
! 				   PTE_PROTO_NOCACHE);
  
  		va += NBPG;
  		m = m->pageq.tqe_next;
***************
*** 1506,1512 ****
  	/* Revoke cacheability and bufferability */
  	/* XXX should be done better than this */
  	pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
! 	*pte = *pte & ~(PT_C | PT_B);
  
  	/* Wire in this page table */
  	pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt, TRUE);
--- 1513,1521 ----
  	/* Revoke cacheability and bufferability */
  	/* XXX should be done better than this */
  	pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
! 	*pte = (*pte & PG_FRAME) | pmap_pte_proto(pmap_kernel(),
! 						  VM_PROT_READ|VM_PROT_WRITE,
! 						  PTE_PROTO_NOCACHE);
  
  	/* Wire in this page table */
  	pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt, TRUE);
***************
*** 1878,1887 ****
  	 * Hook in the page, zero it, and purge the cache for that
  	 * zeroed page. Invalidate the TLB as needed.
  	 */
! 	*page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
  	cpu_tlb_flushD_SE(page_hook0.va);
  	cpu_cpwait();
  	bzero_page(page_hook0.va);
  	cpu_dcache_wbinv_range(page_hook0.va, NBPG);
  }
  
--- 1887,1903 ----
  	 * Hook in the page, zero it, and purge the cache for that
  	 * zeroed page. Invalidate the TLB as needed.
  	 */
! 	KDASSERT((phys & PG_FRAME) == phys);
! 	*page_hook0.pte = phys |
! 	    pmap_pte_proto(pmap_kernel(),
! 			   VM_PROT_READ|VM_PROT_WRITE,
! 			   PTE_PROTO_CACHE);
  	cpu_tlb_flushD_SE(page_hook0.va);
+ 
  	cpu_cpwait();
+ 
  	bzero_page(page_hook0.va);
+ 
  	cpu_dcache_wbinv_range(page_hook0.va, NBPG);
  }
  
***************
*** 1910,1916 ****
  	 * Hook in the page, zero it, and purge the cache for that
  	 * zeroed page. Invalidate the TLB as needed.
  	 */
! 	*page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
  	cpu_tlb_flushD_SE(page_hook0.va);
  	cpu_cpwait();
  
--- 1926,1936 ----
  	 * Hook in the page, zero it, and purge the cache for that
  	 * zeroed page. Invalidate the TLB as needed.
  	 */
! 	KDASSERT((phys & PG_FRAME) == phys);
! 	*page_hook0.pte = phys |
! 	    pmap_pte_proto(pmap_kernel(),
! 			   VM_PROT_READ|VM_PROT_WRITE,
! 			   PTE_PROTO_CACHE);
  	cpu_tlb_flushD_SE(page_hook0.va);
  	cpu_cpwait();
  
***************
*** 1971,1982 ****
  	 * the cache for the appropriate page. Invalidate the TLB
  	 * as required.
  	 */
! 	*page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
! 	*page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
  	cpu_tlb_flushD_SE(page_hook0.va);
  	cpu_tlb_flushD_SE(page_hook1.va);
  	cpu_cpwait();
  	bcopy_page(page_hook0.va, page_hook1.va);
  	cpu_dcache_wbinv_range(page_hook0.va, NBPG);
  	cpu_dcache_wbinv_range(page_hook1.va, NBPG);
  }
--- 1991,2014 ----
  	 * the cache for the appropriate page. Invalidate the TLB
  	 * as required.
  	 */
! 	KDASSERT((src & PG_FRAME) == src);
! 	*page_hook0.pte = src |		/* XXX should be r/o */
! 	    pmap_pte_proto(pmap_kernel(),
! 			   VM_PROT_READ|VM_PROT_WRITE,
! 			   PTE_PROTO_CACHE);
  	cpu_tlb_flushD_SE(page_hook0.va);
+ 
+ 	KDASSERT((dst & PG_FRAME) == dst);
+ 	*page_hook1.pte = dest |
+ 	    pmap_pte_proto(pmap_kernel(),
+ 			   VM_PROT_READ|VM_PROT_WRITE,
+ 			   PTE_PROTO_CACHE);
  	cpu_tlb_flushD_SE(page_hook1.va);
+ 
  	cpu_cpwait();
+ 
  	bcopy_page(page_hook0.va, page_hook1.va);
+ 
  	cpu_dcache_wbinv_range(page_hook0.va, NBPG);
  	cpu_dcache_wbinv_range(page_hook1.va, NBPG);
  }
***************
*** 2194,2199 ****
--- 2226,2232 ----
  	int cacheable_entries = 0;
  	int kern_cacheable = 0;
  	int other_writable = 0;
+ 	int prot;
  
  	pv = pvh->pvh_list;
  	KASSERT(ptes != NULL);
***************
*** 2237,2248 ****
  		if (cacheable_entries == 0)
  		    return;
  		for (npv = pv; npv; npv = npv->pv_next) {
! 			if ((pmap == npv->pv_pmap 
! 			    || kpmap == npv->pv_pmap) && 
  			    (npv->pv_flags & PT_NC) == 0) {
! 				ptes[arm_byte_to_page(npv->pv_va)] &= 
! 				    ~(PT_C | PT_B);
!  				npv->pv_flags |= PT_NC;
  				/*
  				 * If this page needs flushing from the
  				 * cache, and we aren't going to do it
--- 2270,2281 ----
  		if (cacheable_entries == 0)
  		    return;
  		for (npv = pv; npv; npv = npv->pv_next) {
! 			if ((pmap == npv->pv_pmap ||
! 			     kpmap == npv->pv_pmap) && 
  			    (npv->pv_flags & PT_NC) == 0) {
! 				prot = (npv->pv_flags & PT_Wr) ?
! 				    VM_PROT_READ | VM_PROT_WRITE :
! 				    VM_PROT_READ;
  				/*
  				 * If this page needs flushing from the
  				 * cache, and we aren't going to do it
***************
*** 2256,2261 ****
--- 2289,2299 ----
  					    NBPG);
  					cpu_tlb_flushID_SE(npv->pv_va);
  				}
+ 				ptes[arm_byte_to_page(npv->pv_va)] =
+ 				    ptes[arm_byte_to_page(npv->pv_va)] |
+ 				    pmap_pte_proto(npv->pv_pmap, prot,
+ 						   PTE_PROTO_NOCACHE);
+  				npv->pv_flags |= PT_NC;
  			}
  		}
  		if ((clear_cache && cacheable_entries >= 4) ||
***************
*** 2273,2280 ****
  			if ((pmap == npv->pv_pmap ||
  			    (kpmap == npv->pv_pmap && other_writable == 0)) && 
  			    (npv->pv_flags & PT_NC)) {
! 				ptes[arm_byte_to_page(npv->pv_va)] |=
! 				    pte_cache_mode;
  				npv->pv_flags &= ~PT_NC;
  			}
  		}
--- 2311,2323 ----
  			if ((pmap == npv->pv_pmap ||
  			    (kpmap == npv->pv_pmap && other_writable == 0)) && 
  			    (npv->pv_flags & PT_NC)) {
! 				prot = (npv->pv_flags & PT_Wr) ?
! 				    VM_PROT_READ | VM_PROT_WRITE :
! 				    VM_PROT_READ;
! 				ptes[arm_byte_to_page(npv->pv_va)] =
! 				    ptes[arm_byte_to_page(npv->pv_va)] |
! 				    pmap_pte_proto(npv->pv_pmap, prot,
! 						   PTE_PROTO_CACHE);
  				npv->pv_flags &= ~PT_NC;
  			}
  		}
***************
*** 2818,2850 ****
  #endif
  
  	/* Construct the pte, giving the correct access. */
! 	npte = (pa & PG_FRAME);
! 
! 	/* VA 0 is magic. */
! 	if (pmap != pmap_kernel() && va != 0)
! 		npte |= PT_AP(AP_U);
  
  	if (pmap_initialized && bank != -1) {
  #ifdef DIAGNOSTIC
  		if ((flags & VM_PROT_ALL) & ~prot)
  			panic("pmap_enter: access_type exceeds prot");
  #endif
! 		npte |= pte_cache_mode;
  		if (flags & VM_PROT_WRITE) {
! 			npte |= L2_SPAGE | PT_AP(AP_W);
  			vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
  		} else if (flags & VM_PROT_ALL) {
! 			npte |= L2_SPAGE;
  			vm_physmem[bank].pmseg.attrs[off] |= PT_H;
! 		} else
! 			npte |= L2_INVAL;
  	} else {
! 		if (prot & VM_PROT_WRITE)
! 			npte |= L2_SPAGE | PT_AP(AP_W);
! 		else if (prot & VM_PROT_ALL)
! 			npte |= L2_SPAGE;
! 		else
! 			npte |= L2_INVAL;
  	}
  
  #ifdef MYCROFT_HACK
--- 2861,2906 ----
  #endif
  
  	/* Construct the pte, giving the correct access. */
! 	KDASSERT((pa & PG_FRAME) == pa);
! 	npte = pa;
  
+ 	/*
+ 	 * VA 0 is magic; that's where the vector page is.  User pmaps
+ 	 * always need to see an un-cached view of this page (which they
+ 	 * would anyway, since it's not in the managed page pool, so there
+ 	 * is no need to check for it).
+ 	 */
  	if (pmap_initialized && bank != -1) {
+ 		KDASSERT(va != 0);
  #ifdef DIAGNOSTIC
  		if ((flags & VM_PROT_ALL) & ~prot)
  			panic("pmap_enter: access_type exceeds prot");
  #endif
! 		/*
! 		 * XXXJRT -- consider optimization potential.
! 		 * C.f. Alpha pmap.
! 		 */
  		if (flags & VM_PROT_WRITE) {
! 			npte |= pmap_pte_proto(pmap,
! 					       VM_PROT_READ|VM_PROT_WRITE,
! 					       PTE_PROTO_CACHE);
  			vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
  		} else if (flags & VM_PROT_ALL) {
! 			npte |= pmap_pte_proto(pmap,
! 					       VM_PROT_READ,
! 					       PTE_PROTO_CACHE);
  			vm_physmem[bank].pmseg.attrs[off] |= PT_H;
! 		}
! 		/*
! 		 * ...else we want to take a fault, so don't do anything
! 		 * to the PTE here.
! 		 */
  	} else {
! 		/*
! 		 * Non-managed pages entered via this interface
! 		 * are implicitly un-cached.
! 		 */
! 		npte |= pmap_pte_proto(pmap, prot, PTE_PROTO_NOCACHE);
  	}
  
  #ifdef MYCROFT_HACK
***************
*** 2920,2926 ****
  	}
  	pte = vtopte(va);
  	KASSERT(!pmap_pte_v(pte));
! 	*pte = L2_PTE(pa, AP_KRW);
  }
  
  void
--- 2976,2989 ----
  	}
  	pte = vtopte(va);
  	KASSERT(!pmap_pte_v(pte));
! #if 1 /* XXX */
! 	*pte = pa | pmap_pte_proto(pmap_kernel(),
! 				   VM_PROT_READ|VM_PROT_WRITE,
! 				   PTE_PROTO_CACHE);
! #else
! 	*pte = pa | pmap_pte_proto(pmap_kernel(), prot,
! 				   PTE_PROTO_CACHE);
! #endif
  }
  
  void
***************
*** 3357,3363 ****
  		pte = pmap_pte(pv->pv_pmap, va);
  		KASSERT(pte != NULL);
  		if (maskbits & (PT_Wr|PT_M)) {
! 			if ((pv->pv_flags & PT_NC)) {
  				/* 
  				 * Entry is not cacheable: reenable
  				 * the cache, nothing to flush
--- 3420,3426 ----
  		pte = pmap_pte(pv->pv_pmap, va);
  		KASSERT(pte != NULL);
  		if (maskbits & (PT_Wr|PT_M)) {
! 			if (pv->pv_flags & PT_NC) {
  				/* 
  				 * Entry is not cacheable: reenable
  				 * the cache, nothing to flush
***************
*** 3375,3406 ****
  				 *
  				 */
  				if (maskbits & PT_Wr) {
! 					*pte |= pte_cache_mode;
  					pv->pv_flags &= ~PT_NC;
  				}
! 			} else if (pmap_is_curpmap(pv->pv_pmap))
! 				/* 
  				 * Entry is cacheable: check if pmap is
! 				 * current if it is flush it,
! 				 * otherwise it won't be in the cache
  				 */
  				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
  
! 			/* make the pte read only */
! 			*pte &= ~PT_AP(AP_W);
  		}
  
! 		if (maskbits & PT_H)
! 			*pte = (*pte & ~L2_MASK) | L2_INVAL;
  
! 		if (pmap_is_curpmap(pv->pv_pmap))
  			/* 
! 			 * if we had cacheable pte's we'd clean the
! 			 * pte out to memory here
! 			 *
! 			 * flush tlb entry as it's in the current pmap
  			 */
  			cpu_tlb_flushID_SE(pv->pv_va); 
  	}
  	cpu_cpwait();
  
--- 3438,3489 ----
  				 *
  				 */
  				if (maskbits & PT_Wr) {
! 					/*
! 					 * Clear the NC bit in the pv
! 					 * entry; we'll update the PTE
! 					 * below.
! 					 */
  					pv->pv_flags &= ~PT_NC;
  				}
! 			} else if (pmap_is_curpmap(pv->pv_pmap)) {
! 				/*
  				 * Entry is cacheable: check if pmap is
! 				 * current, and if it is, flush it,
! 				 * otherwise it won't be in the cache.
  				 */
  				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
+ 			}
  
! 			/* Make the PTE read-only. */
! 			*pte = (*pte & PG_FRAME) |
! 			    pmap_pte_proto(pv->pv_pmap, VM_PROT_READ,
! 					   (pv->pv_flags & PT_NC) ?
! 					   PTE_PROTO_NOCACHE :
! 					   PTE_PROTO_CACHE);
  		}
  
! 		if (maskbits & PT_H) {
! 			/*
! 			 * We are going to revoke the mapping for this
! 			 * page.  If it is writable, make sure to flush
! 			 * it from the cache.
! 			 *
! 			 * XXXJRT This flush might be redundant!
! 			 */
! 			if ((pv->pv_flags & PT_Wr) != 0 &&
! 			    pmap_is_curpmap(pv->pv_pmap))
! 				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
  
! 			*pte = *pte & PG_FRAME;
! 		}
! 
! 		if (pmap_is_curpmap(pv->pv_pmap)) {
  			/* 
! 			 * The PTE has been modifed, and it's in the
! 			 * current pmap, invalidate the TLB entry.
  			 */
  			cpu_tlb_flushID_SE(pv->pv_va); 
+ 		}
  	}
  	cpu_cpwait();
  
***************
*** 3499,3504 ****
--- 3582,3588 ----
  		return(0);
  
  	/* This can happen if user code tries to access kernel memory. */
+ 	/* XXXJRT Use address-based check.  C.f. Alpha pmap. */
  	if ((*pte & PT_AP(AP_W)) != 0)
  		return (0);
  
***************
*** 3540,3546 ****
  	 * already set the cacheable bits based on the assumption that we
  	 * can write to this page.
  	 */
! 	*pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
  	PDEBUG(0, printf("->(%08x)\n", *pte));
  
  	simple_unlock(&pvh->pvh_lock);
--- 3624,3633 ----
  	 * already set the cacheable bits based on the assumption that we
  	 * can write to this page.
  	 */
! 	*pte = (*pte & PG_FRAME) |
! 	    pmap_pte_proto(pmap, VM_PROT_READ|VM_PROT_WRITE,
! 			   (flags & PT_NC) ? PTE_PROTO_NOCACHE
! 					   : PTE_PROTO_CACHE);
  	PDEBUG(0, printf("->(%08x)\n", *pte));
  
  	simple_unlock(&pvh->pvh_lock);
***************
*** 3720,3723 ****
  	return (ptp);
  }
  
! /* End of pmap.c */
--- 3807,4008 ----
  	return (ptp);
  }
  
! /*
!  * pmap_pte_protos_init:
!  *
!  *	Initialize the prototype PTE arrays.  This is done very
!  *	early, right after the cpufunc vector is selected.
!  */
! #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
!     defined(CPU_ARM8) || defined(CPU_SA110)
! void
! pmap_pte_protos_init_arm678(void)
! {
! 	int prot;
! 
! #define	CACHE	(PT_B|PT_C)
! 
! 	for (prot = 0; prot < 8; prot++) {
! 		if (prot == 0) {
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_CACHE) = 0;
! 
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_NOCACHE) = 0;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_CACHE) = 0;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_NOCACHE) = 0;
! 		} else if (prot & VM_PROT_WRITE) {
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRW) | CACHE;
! 
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRW);
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWURW) | CACHE;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWURW);
! 		} else {
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KR) | CACHE;
! 
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KR);
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWUR) | CACHE;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWUR);
! 		}
! 	}
! #undef CACHE
! }
! #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
! 
! #if defined(CPU_ARM9)
! void
! pmap_pte_protos_init_arm9(void)
! {
! 	int prot;
! 
! /* Use the cache in write-through mode for now. */
! #define	CACHE	(PT_C)
! 
! 	for (prot = 0; prot < 8; prot++) {
! 		if (prot == 0) {
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_CACHE) = 0;
! 
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_NOCACHE) = 0;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_CACHE) = 0;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_NOCACHE) = 0;
! 		} else if (prot & VM_PROT_WRITE) {
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRW) | CACHE;
! 
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRW);
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWURW) | CACHE;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWURW);
! 		} else {
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KR) | CACHE;
! 
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KR);
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWUR) | CACHE;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWUR);
! 		}
! 	}
! #undef CACHE
! }
! #endif /* CPU_ARM9 */
! 
! #if defined(CPU_XSCALE)
! void
! pmap_pte_protos_init_xscale(void)
! {
! 	int prot;
! 
! /*
!  * i80200 errata item #40: Store to cacheable memory,
!  * interrupted by an exception, may inadvertently
!  * write to memory.
!  *
!  * This can have an adverse affect on copy-on-write
!  * operation.
!  *
!  * Work-around: Non-writable mappings should have
!  * a cache mode of write-through (this avoids the
!  * problem).  This has no adverse performance affect,
!  * since the mappings are read-only.
!  */
! #define	CACHE_WT	(PT_C)
! #define	CACHE_WB	(PT_C)		/* XXX for now */
! 
! 	for (prot = 0; prot < 8; prot++) {
! 		if (prot == 0) {
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_CACHE) = 0;
! 
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_NOCACHE) = 0;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_CACHE) = 0;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_NOCACHE) = 0;
! 		} else if (prot & VM_PROT_WRITE) {
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRW) | CACHE_WB;
! 
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRW);
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWURW) | CACHE_WB;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWURW);
! 		} else {
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KR) | CACHE_WT;
! 
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KR);
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWUR) | CACHE_WT;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWUR);
! 		}
! 	}
! #undef CACHE_WT
! #undef CACHE_WB
! }
! #endif /* CPU_XSCALE */
Index: arm/include/arm32/pmap.h
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/include/arm32/pmap.h,v
retrieving revision 1.20
diff -c -r1.20 pmap.h
*** arm/include/arm32/pmap.h	2002/01/19 16:55:22	1.20
--- arm/include/arm32/pmap.h	2002/01/30 23:14:17
***************
*** 138,147 ****
  } pv_addr_t;
  
  /*
!  * _KERNEL specific macros, functions and prototypes
   */
  
! #ifdef  _KERNEL
  
  /*
   * Commonly referenced structures
--- 138,163 ----
  } pv_addr_t;
  
  /*
!  * Prototype PTE bits for each VM protection code, both cached
!  * and un-cached, kernel and userland.
   */
+ extern pt_entry_t pte_protos[4][8];
  
! #define	PTE_PROTO_KERNEL	0
! #define	PTE_PROTO_USER		1
! #define	PTE_PROTO_CACHE		0
! #define	PTE_PROTO_NOCACHE	2
! 
! #define	pte_proto(ku, prot, cache)					\
! 	pte_protos[(ku) + (cache)][(prot)]
! 
! #define	pmap_pte_proto(pm, prot, cache)					\
! 	pte_proto((pm == pmap_kernel()) ? PTE_PROTO_KERNEL		\
! 					: PTE_PROTO_USER, (prot), (cache))
! 
! void	pmap_pte_protos_init_arm678(void);
! void	pmap_pte_protos_init_arm9(void);
! void	pmap_pte_protos_init_xscale(void);
  
  /*
   * Commonly referenced structures
***************
*** 181,188 ****
   */
  boolean_t	pmap_pageidlezero __P((paddr_t));
  #define PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
- 
- #endif	/* _KERNEL */
  
  /*
   * Useful macros and constants 
--- 197,202 ----
Index: arm/include/arm32/pte.h
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/include/arm32/pte.h,v
retrieving revision 1.1
diff -c -r1.1 pte.h
*** arm/include/arm32/pte.h	2001/11/23 17:39:04	1.1
--- arm/include/arm32/pte.h	2002/01/30 23:14:18
***************
*** 74,85 ****
  #define PT_C		0x08	/* Phys - Cacheable */
  #define PT_U		0x10	/* Phys - Updateable */
  
- #ifndef _LOCORE
- extern pt_entry_t	pte_cache_mode;
- 
- #define PT_CACHEABLE	(pte_cache_mode)
- #endif
- 
  /* Page R/M attributes (in pmseg.attrs). */
  #define PT_M		0x01	/* Virt - Modified */
  #define PT_H		0x02	/* Virt - Handled (Used) */
--- 74,79 ----
***************
*** 106,115 ****
  
  /* PTE construction macros */
  #define	L2_LPTE(p, a, f)	((p) | PT_AP(a) | L2_LPAGE | (f))
- #define L2_SPTE(p, a, f)	((p) | PT_AP(a) | L2_SPAGE | (f))
- #define L2_PTE(p, a)		L2_SPTE((p), (a), PT_CACHEABLE)
- #define L2_PTE_NC(p, a)		L2_SPTE((p), (a), PT_B)
- #define L2_PTE_NC_NB(p, a)	L2_SPTE((p), (a), 0)
  #define L1_SECPTE(p, a, f)	((p) | ((a) << AP_SECTION_SHIFT) | (f) \
  				| L1_SECTION | PT_U)
  
--- 100,105 ----
Index: arm/mainbus/mainbus_io.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/mainbus/mainbus_io.c,v
retrieving revision 1.6
diff -c -r1.6 mainbus_io.c
*** arm/mainbus/mainbus_io.c	2001/11/23 17:23:42	1.6
--- arm/mainbus/mainbus_io.c	2002/01/30 23:14:19
***************
*** 163,173 ****
  
  	for(pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
  		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
! 		pte = pmap_pte(pmap_kernel(), va);
! 		if (cacheable)
! 			*pte |= PT_CACHEABLE;
! 		else
! 			*pte &= ~PT_CACHEABLE;
  	}
  	pmap_update(pmap_kernel());
  
--- 163,175 ----
  
  	for(pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
  		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
! 		if (cacheable == 0) {
! 			pte = pmap_pte(pmap_kernel(), va);
! 			*pte = (*pte & PG_FRAME) |
! 			    pmap_pte_proto(pmap_kernel(),
! 					   VM_PROT_READ|VM_PROT_WRITE,
! 					   PTE_PROTO_NOCACHE);
! 		}
  	}
  	pmap_update(pmap_kernel());
  
Index: evbarm/iq80310/iq80310_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/evbarm/iq80310/iq80310_machdep.c,v
retrieving revision 1.18
diff -c -r1.18 iq80310_machdep.c
*** evbarm/iq80310/iq80310_machdep.c	2002/01/30 04:01:36	1.18
--- evbarm/iq80310/iq80310_machdep.c	2002/01/30 23:14:24
***************
*** 609,629 ****
  		/*
  		 * This maps the kernel text/data/bss VA==PA.
  		 */
  		logical += map_chunk(l1pagetable, l2pagetable,
  		    KERNEL_BASE + logical,
  		    physical_start + logical, textsize,
! 		    AP_KRW, PT_CACHEABLE);
  		logical += map_chunk(l1pagetable, l2pagetable,
  		    KERNEL_BASE + logical,
  		    physical_start + logical, totalsize - textsize,
! 		    AP_KRW, PT_CACHEABLE);
  
  #if 0 /* XXX No symbols yet. */
  		logical += map_chunk(l1pagetable, l2pagetable,
  		    KERNEL_BASE + logical,
  		    physical_start + logical, kernexec->a_syms + sizeof(int)
  		    + *(u_int *)((int)end + kernexec->a_syms + sizeof(int)),
! 		    AP_KRW, PT_CACHEABLE);
  #endif
  	}
  
--- 609,630 ----
  		/*
  		 * This maps the kernel text/data/bss VA==PA.
  		 */
+ 		/* XXXJRT Clean me up */
  		logical += map_chunk(l1pagetable, l2pagetable,
  		    KERNEL_BASE + logical,
  		    physical_start + logical, textsize,
! 		    AP_KRW, PT_C);
  		logical += map_chunk(l1pagetable, l2pagetable,
  		    KERNEL_BASE + logical,
  		    physical_start + logical, totalsize - textsize,
! 		    AP_KRW, PT_C);
  
  #if 0 /* XXX No symbols yet. */
  		logical += map_chunk(l1pagetable, l2pagetable,
  		    KERNEL_BASE + logical,
  		    physical_start + logical, kernexec->a_syms + sizeof(int)
  		    + *(u_int *)((int)end + kernexec->a_syms + sizeof(int)),
! 		    AP_KRW, PT_C);
  #endif
  	}
  
***************
*** 632,651 ****
  #endif
  
  	/* Map the stack pages */
  	map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
! 	    IRQ_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
  	map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
! 	    ABT_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
  	map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
! 	    UND_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
  	map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
! 	    UPAGES * NBPG, AP_KRW, PT_CACHEABLE);
  	map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
  	    PD_SIZE, AP_KRW, 0);
  
  	/* Map the Mini-Data cache clean area. */
  	map_chunk(0, l2pagetable, minidataclean.pv_va, minidataclean.pv_pa,
! 	    NBPG, AP_KRW, PT_CACHEABLE);
  
  	/* Map the page table that maps the kernel pages */
  	map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
--- 633,657 ----
  #endif
  
  	/* Map the stack pages */
+ 	/* XXXJRT Clean me up. */
  	map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
! 	    IRQ_STACK_SIZE * NBPG, AP_KRW,
! 	    PT_C);
  	map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
! 	    ABT_STACK_SIZE * NBPG, AP_KRW,
! 	    PT_C);
  	map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
! 	    UND_STACK_SIZE * NBPG, AP_KRW,
! 	    PT_C);
  	map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
! 	    UPAGES * NBPG, AP_KRW,
! 	    PT_C);
  	map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
  	    PD_SIZE, AP_KRW, 0);
  
  	/* Map the Mini-Data cache clean area. */
  	map_chunk(0, l2pagetable, minidataclean.pv_va, minidataclean.pv_pa,
! 	    NBPG, AP_KRW, PT_C);
  
  	/* Map the page table that maps the kernel pages */
  	map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);

--NHfequSh1hmJPP0s--