Subject: Re: Using different cache modes for r/o vs r/w pages
To: None <Richard.Earnshaw@arm.com, port-arm@netbsd.org>
From: Jason R Thorpe <thorpej@wasabisystems.com>
List: port-arm
Date: 01/30/2002 18:27:29
--aKl9HDSa9q6Cj3fk
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline

On Wed, Jan 30, 2002 at 03:21:16PM -0800, Jason R Thorpe wrote:

 > I'll boot it after I get back from the gym.

Ok, with a few tweaks to pmap_handled_emulation(), it boots.

It's currently not as efficient as it could be.  I'm going to clean
up the mod/ref stuff in pmap_enter() to make it a bit faster, and
use __HAVE_VM_PAGE_MD to speed up mod/ref emulation a bit.  In
particular, things like the page's cacheable attribute belong in
flags in the pv_head (or, vm_page_md), not copied into every individual
pv_entry.

--
        -- Jason R. Thorpe <thorpej@wasabisystems.com>

--aKl9HDSa9q6Cj3fk
Content-Type: text/plain; charset=us-ascii
Content-Description: pmap-cache-take3
Content-Disposition: attachment; filename=foo

Index: arm/cpufunc.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm/cpufunc.c,v
retrieving revision 1.29
diff -c -r1.29 cpufunc.c
*** arm/cpufunc.c	2002/01/30 00:37:18	1.29
--- arm/cpufunc.c	2002/01/31 02:23:26
***************
*** 53,58 ****
--- 53,61 ----
  #include <sys/types.h>
  #include <sys/param.h>
  #include <sys/systm.h>
+ 
+ #include <uvm/uvm_extern.h>
+ 
  #include <machine/cpu.h>
  #include <machine/bootconfig.h>
  #include <arch/arm/arm/disassem.h>
***************
*** 650,655 ****
--- 653,659 ----
  		cpu_reset_needs_v4_MMU_disable = 0;
  		/* XXX Cache info? */
  		arm_dcache_align_mask = -1;
+ 		pmap_pte_protos_init_arm678();
  		return 0;
  	}
  #endif	/* CPU_ARM6 */
***************
*** 661,666 ****
--- 665,671 ----
  		cpu_reset_needs_v4_MMU_disable = 0;
  		/* XXX Cache info? */
  		arm_dcache_align_mask = -1;
+ 		pmap_pte_protos_init_arm678();
  		return 0;
  	}
  #endif	/* CPU_ARM7 */
***************
*** 671,676 ****
--- 676,682 ----
  		cpufuncs = arm7tdmi_cpufuncs;
  		cpu_reset_needs_v4_MMU_disable = 0;
  		get_cachetype();
+ 		pmap_pte_protos_init_arm678();
  		return 0;
  	}
  #endif	
***************
*** 680,694 ****
  		cpufuncs = arm8_cpufuncs;
  		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
  		get_cachetype();
  		return 0;
  	}
  #endif	/* CPU_ARM8 */
  #ifdef CPU_ARM9
  	if (cputype == CPU_ID_ARM920T) {
- 		pte_cache_mode = PT_C;	/* Select write-through cacheing. */
  		cpufuncs = arm9_cpufuncs;
  		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
  		get_cachetype();
  		return 0;
  	}
  #endif /* CPU_ARM9 */
--- 686,701 ----
  		cpufuncs = arm8_cpufuncs;
  		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
  		get_cachetype();
+ 		pmap_pte_protos_init_arm678();
  		return 0;
  	}
  #endif	/* CPU_ARM8 */
  #ifdef CPU_ARM9
  	if (cputype == CPU_ID_ARM920T) {
  		cpufuncs = arm9_cpufuncs;
  		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
  		get_cachetype();
+ 		pmap_pte_protos_init_arm9();
  		return 0;
  	}
  #endif /* CPU_ARM9 */
***************
*** 698,703 ****
--- 705,711 ----
  		cpufuncs = sa110_cpufuncs;
  		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
  		get_cachetype();
+ 		pmap_pte_protos_init_arm678();		/* XXX */
  		/*
  		 * Enable the right variant of sleeping.
  		 */
***************
*** 743,749 ****
  			:
  			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
  
- 		pte_cache_mode = PT_C;	/* Select write-through cacheing. */
  		cpufuncs = xscale_cpufuncs;
  
  		/*
--- 751,756 ----
***************
*** 758,763 ****
--- 765,771 ----
  
  		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
  		get_cachetype();
+ 		pmap_pte_protos_init_xscale();
  		return 0;
  	}
  #endif /* CPU_XSCALE */
Index: arm32/arm32_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm32/arm32_machdep.c,v
retrieving revision 1.11
diff -c -r1.11 arm32_machdep.c
*** arm32/arm32_machdep.c	2002/01/20 03:41:47	1.11
--- arm32/arm32_machdep.c	2002/01/31 02:23:26
***************
*** 168,174 ****
  
  	if (cacheable)
  		((u_int *)pagetable)[(va >> PDSHIFT)] =
! 		    L1_SEC((pa & PD_MASK), pte_cache_mode);
  	else
  		((u_int *)pagetable)[(va >> PDSHIFT)] =
  		    L1_SEC((pa & PD_MASK), 0);
--- 168,174 ----
  
  	if (cacheable)
  		((u_int *)pagetable)[(va >> PDSHIFT)] =
! 		    L1_SEC((pa & PD_MASK), (PT_B|PT_C)); /* XXXJRT */
  	else
  		((u_int *)pagetable)[(va >> PDSHIFT)] =
  		    L1_SEC((pa & PD_MASK), 0);
***************
*** 255,264 ****
  #ifdef VERBOSE_INIT_ARM
  			printf("P");
  #endif
  #ifndef cats			
! 			l2pt[((va >> PGSHIFT) & 0x3ff)] = L2_SPTE(pa, acc, flg);
  #else
! 			l2pt[((va >> PGSHIFT) & 0x7ff)] = L2_SPTE(pa, acc, flg);
  #endif
  			va += NBPG;
  			pa += NBPG;
--- 255,267 ----
  #ifdef VERBOSE_INIT_ARM
  			printf("P");
  #endif
+ 			/* XXXJRT Clean me up */
  #ifndef cats			
! 			l2pt[((va >> PGSHIFT) & 0x3ff)] =
! 			    pa | PT_AP(acc) | flg | L2_SPAGE;
  #else
! 			l2pt[((va >> PGSHIFT) & 0x7ff)] =
! 			    pa | PT_AP(acc) | flg | L2_SPAGE;
  #endif
  			va += NBPG;
  			pa += NBPG;
***************
*** 280,289 ****
  {
  #ifndef cats
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
! 	    L2_PTE((pa & PG_FRAME), AP_KRW);
  #else
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
! 	    L2_PTE((pa & PG_FRAME), AP_KRW);
  #endif	
  }
  
--- 283,296 ----
  {
  #ifndef cats
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
! 	    (pa & PG_FRAME) | pte_proto(PTE_PROTO_KERNEL,
! 					VM_PROT_READ|VM_PROT_WRITE,
! 					PTE_PROTO_CACHE);
  #else
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
! 	    (pa & PG_FRAME) | pte_proto(PTE_PROTO_KERNEL,
! 					VM_PROT_READ|VM_PROT_WRITE,
! 					PTE_PROTO_CACHE);
  #endif	
  }
  
***************
*** 296,305 ****
  {
  #ifndef cats
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
! 	    L2_PTE_NC_NB((pa & PG_FRAME), AP_KRW);
  #else
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
! 	    L2_PTE_NC_NB((pa & PG_FRAME), AP_KRW);
  #endif
  }
  
--- 303,316 ----
  {
  #ifndef cats
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
! 	    (pa & PG_FRAME) | pte_proto(PTE_PROTO_KERNEL,
! 					VM_PROT_READ|VM_PROT_WRITE,
! 					PTE_PROTO_NOCACHE);
  #else
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
! 	    (pa & PG_FRAME) | pte_proto(PTE_PROTO_KERNEL,
! 					VM_PROT_READ|VM_PROT_WRITE,
! 					PTE_PROTO_NOCACHE);
  #endif
  }
  
***************
*** 312,321 ****
  {
  #ifndef cats
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
! 	    L2_PTE((pa & PG_FRAME), AP_KR);
  #else
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
! 	    L2_PTE((pa & PG_FRAME), AP_KR);
  #endif
  }
  
--- 323,336 ----
  {
  #ifndef cats
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
! 	    (pa & PG_FRAME) | pte_proto(PTE_PROTO_KERNEL,
! 					VM_PROT_READ,
! 					PTE_PROTO_CACHE);
  #else
  	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
! 	    (pa & PG_FRAME) | pte_proto(PTE_PROTO_KERNEL,
! 					VM_PROT_READ,
! 					PTE_PROTO_CACHE);
  #endif
  }
  
***************
*** 486,493 ****
  void
  zero_page_readonly()
  {
  	WriteWord(PROCESS_PAGE_TBLS_BASE + 0,
! 	    L2_PTE((systempage.pv_pa & PG_FRAME), AP_KR));
  	cpu_tlb_flushID_SE(0x00000000);
  }
  
--- 501,512 ----
  void
  zero_page_readonly()
  {
+ 
+ 	/* XXXJRT Do we really care about caching page0?! */
  	WriteWord(PROCESS_PAGE_TBLS_BASE + 0,
! 	    systempage.pv_pa | pte_proto(PTE_PROTO_KERNEL,
! 					 VM_PROT_READ,
! 					 PTE_PROTO_CACHE));
  	cpu_tlb_flushID_SE(0x00000000);
  }
  
***************
*** 502,509 ****
  void
  zero_page_readwrite()
  {
  	WriteWord(PROCESS_PAGE_TBLS_BASE + 0,
! 	    L2_PTE((systempage.pv_pa & PG_FRAME), AP_KRW));
  	cpu_tlb_flushID_SE(0x00000000);
  }
  
--- 521,532 ----
  void
  zero_page_readwrite()
  {
+ 
+ 	/* XXXJRT See above. */
  	WriteWord(PROCESS_PAGE_TBLS_BASE + 0,
! 	    systempage.pv_pa | pte_proto(PTE_PROTO_KERNEL,
! 					 VM_PROT_READ|VM_PROT_WRITE,
! 					 PTE_PROTO_CACHE));
  	cpu_tlb_flushID_SE(0x00000000);
  }
  
Index: arm32/bus_dma.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm32/bus_dma.c,v
retrieving revision 1.8
diff -c -r1.8 bus_dma.c
*** arm32/bus_dma.c	2002/01/25 20:57:41	1.8
--- arm32/bus_dma.c	2002/01/31 02:23:27
***************
*** 547,553 ****
  				cpu_dcache_wbinv_range(va, NBPG);
  				cpu_drain_writebuf();
  				ptep = vtopte(va);
! 				*ptep = ((*ptep) & (~PT_C | PT_B));
  				tlb_flush();
  			}
  #ifdef DEBUG_DMA
--- 547,556 ----
  				cpu_dcache_wbinv_range(va, NBPG);
  				cpu_drain_writebuf();
  				ptep = vtopte(va);
! 				*ptep = (*ptep & PG_FRAME) |
! 				    pmap_pte_proto(pmap_kernel(),
! 						   VM_PROT_READ|VM_PROT_WRITE,
! 						   PTE_PROTO_NOCACHE);
  				tlb_flush();
  			}
  #ifdef DEBUG_DMA
Index: arm32/pmap.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm32/pmap.c,v
retrieving revision 1.36
diff -c -r1.36 pmap.c
*** arm32/pmap.c	2002/01/25 19:19:25	1.36
--- arm32/pmap.c	2002/01/31 02:23:30
***************
*** 193,198 ****
--- 193,199 ----
  extern caddr_t msgbufaddr;
  
  boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
+ 
  /*
   * locking data structures
   */
***************
*** 326,336 ****
      pt_entry_t *, boolean_t));
  
  /*
!  * Cache enable bits in PTE to use on pages that are cacheable.
!  * On most machines this is cacheable/bufferable, but on some, eg arm10, we
!  * can chose between write-through and write-back cacheing.
   */
! pt_entry_t pte_cache_mode = (PT_C | PT_B);
  
  /*
   * real definition of pv_entry.
--- 327,335 ----
      pt_entry_t *, boolean_t));
  
  /*
!  * Prototype PTE array.  These are initialized in pmap_pte_protos_init_*().
   */
! pt_entry_t pte_protos[4][8];
  
  /*
   * real definition of pv_entry.
***************
*** 943,954 ****
  	pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
  
  	PDEBUG(0, printf("pt self reference %lx in %lx\n",
! 	    L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
  
  	/* Map the page table into the page table area. */
  	if (selfref) {
  		*((pt_entry_t *)(pmap->pm_vptpt + ptva)) =
! 			L2_PTE_NC_NB(l2pa, AP_KRW);
  	}
  	/* XXX should be a purge */
  /*	cpu_tlb_flushD();*/
--- 942,958 ----
  	pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
  
  	PDEBUG(0, printf("pt self reference %lx in %lx\n",
! 	    l2pa | pmap_pte_proto(pmap_kernel(),
! 				  VM_PROT_READ|VM_PROT_WRITE,
! 				  PTE_PROTO_NOCACHE),
! 			 pmap->pm_vptpt));
  
  	/* Map the page table into the page table area. */
  	if (selfref) {
  		*((pt_entry_t *)(pmap->pm_vptpt + ptva)) =
! 		    l2pa | pmap_pte_proto(pmap_kernel(),
! 					  VM_PROT_READ|VM_PROT_WRITE,
! 					  PTE_PROTO_NOCACHE);
  	}
  	/* XXX should be a purge */
  /*	cpu_tlb_flushD();*/
***************
*** 1392,1398 ****
  
  		/* Revoke cacheability and bufferability */
  		/* XXX should be done better than this */
! 		ptes[arm_byte_to_page(va)] &= ~(PT_C | PT_B);
  
  		va += NBPG;
  		m = m->pageq.tqe_next;
--- 1396,1406 ----
  
  		/* Revoke cacheability and bufferability */
  		/* XXX should be done better than this */
! 		ptes[arm_byte_to_page(va)] =
! 		    (ptes[arm_byte_to_page(va)] & PG_FRAME) |
! 		    pmap_pte_proto(pmap_kernel(),
! 				   VM_PROT_READ|VM_PROT_WRITE,
! 				   PTE_PROTO_NOCACHE);
  
  		va += NBPG;
  		m = m->pageq.tqe_next;
***************
*** 1506,1512 ****
  	/* Revoke cacheability and bufferability */
  	/* XXX should be done better than this */
  	pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
! 	*pte = *pte & ~(PT_C | PT_B);
  
  	/* Wire in this page table */
  	pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt, TRUE);
--- 1514,1522 ----
  	/* Revoke cacheability and bufferability */
  	/* XXX should be done better than this */
  	pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
! 	*pte = (*pte & PG_FRAME) | pmap_pte_proto(pmap_kernel(),
! 						  VM_PROT_READ|VM_PROT_WRITE,
! 						  PTE_PROTO_NOCACHE);
  
  	/* Wire in this page table */
  	pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt, TRUE);
***************
*** 1878,1887 ****
  	 * Hook in the page, zero it, and purge the cache for that
  	 * zeroed page. Invalidate the TLB as needed.
  	 */
! 	*page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
  	cpu_tlb_flushD_SE(page_hook0.va);
  	cpu_cpwait();
  	bzero_page(page_hook0.va);
  	cpu_dcache_wbinv_range(page_hook0.va, NBPG);
  }
  
--- 1888,1904 ----
  	 * Hook in the page, zero it, and purge the cache for that
  	 * zeroed page. Invalidate the TLB as needed.
  	 */
! 	KDASSERT((phys & PG_FRAME) == phys);
! 	*page_hook0.pte = phys |
! 	    pmap_pte_proto(pmap_kernel(),
! 			   VM_PROT_READ|VM_PROT_WRITE,
! 			   PTE_PROTO_CACHE);
  	cpu_tlb_flushD_SE(page_hook0.va);
+ 
  	cpu_cpwait();
+ 
  	bzero_page(page_hook0.va);
+ 
  	cpu_dcache_wbinv_range(page_hook0.va, NBPG);
  }
  
***************
*** 1910,1916 ****
  	 * Hook in the page, zero it, and purge the cache for that
  	 * zeroed page. Invalidate the TLB as needed.
  	 */
! 	*page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
  	cpu_tlb_flushD_SE(page_hook0.va);
  	cpu_cpwait();
  
--- 1927,1937 ----
  	 * Hook in the page, zero it, and purge the cache for that
  	 * zeroed page. Invalidate the TLB as needed.
  	 */
! 	KDASSERT((phys & PG_FRAME) == phys);
! 	*page_hook0.pte = phys |
! 	    pmap_pte_proto(pmap_kernel(),
! 			   VM_PROT_READ|VM_PROT_WRITE,
! 			   PTE_PROTO_CACHE);
  	cpu_tlb_flushD_SE(page_hook0.va);
  	cpu_cpwait();
  
***************
*** 1971,1982 ****
  	 * the cache for the appropriate page. Invalidate the TLB
  	 * as required.
  	 */
! 	*page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
! 	*page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
  	cpu_tlb_flushD_SE(page_hook0.va);
  	cpu_tlb_flushD_SE(page_hook1.va);
  	cpu_cpwait();
  	bcopy_page(page_hook0.va, page_hook1.va);
  	cpu_dcache_wbinv_range(page_hook0.va, NBPG);
  	cpu_dcache_wbinv_range(page_hook1.va, NBPG);
  }
--- 1992,2015 ----
  	 * the cache for the appropriate page. Invalidate the TLB
  	 * as required.
  	 */
! 	KDASSERT((src & PG_FRAME) == src);
! 	*page_hook0.pte = src |		/* XXX should be r/o */
! 	    pmap_pte_proto(pmap_kernel(),
! 			   VM_PROT_READ|VM_PROT_WRITE,
! 			   PTE_PROTO_CACHE);
  	cpu_tlb_flushD_SE(page_hook0.va);
+ 
+ 	KDASSERT((dst & PG_FRAME) == dst);
+ 	*page_hook1.pte = dest |
+ 	    pmap_pte_proto(pmap_kernel(),
+ 			   VM_PROT_READ|VM_PROT_WRITE,
+ 			   PTE_PROTO_CACHE);
  	cpu_tlb_flushD_SE(page_hook1.va);
+ 
  	cpu_cpwait();
+ 
  	bcopy_page(page_hook0.va, page_hook1.va);
+ 
  	cpu_dcache_wbinv_range(page_hook0.va, NBPG);
  	cpu_dcache_wbinv_range(page_hook1.va, NBPG);
  }
***************
*** 2194,2199 ****
--- 2227,2233 ----
  	int cacheable_entries = 0;
  	int kern_cacheable = 0;
  	int other_writable = 0;
+ 	int prot;
  
  	pv = pvh->pvh_list;
  	KASSERT(ptes != NULL);
***************
*** 2237,2248 ****
  		if (cacheable_entries == 0)
  		    return;
  		for (npv = pv; npv; npv = npv->pv_next) {
! 			if ((pmap == npv->pv_pmap 
! 			    || kpmap == npv->pv_pmap) && 
  			    (npv->pv_flags & PT_NC) == 0) {
! 				ptes[arm_byte_to_page(npv->pv_va)] &= 
! 				    ~(PT_C | PT_B);
!  				npv->pv_flags |= PT_NC;
  				/*
  				 * If this page needs flushing from the
  				 * cache, and we aren't going to do it
--- 2271,2282 ----
  		if (cacheable_entries == 0)
  		    return;
  		for (npv = pv; npv; npv = npv->pv_next) {
! 			if ((pmap == npv->pv_pmap ||
! 			     kpmap == npv->pv_pmap) && 
  			    (npv->pv_flags & PT_NC) == 0) {
! 				prot = (npv->pv_flags & PT_Wr) ?
! 				    VM_PROT_READ | VM_PROT_WRITE :
! 				    VM_PROT_READ;
  				/*
  				 * If this page needs flushing from the
  				 * cache, and we aren't going to do it
***************
*** 2256,2261 ****
--- 2290,2300 ----
  					    NBPG);
  					cpu_tlb_flushID_SE(npv->pv_va);
  				}
+ 				ptes[arm_byte_to_page(npv->pv_va)] =
+ 				    ptes[arm_byte_to_page(npv->pv_va)] |
+ 				    pmap_pte_proto(npv->pv_pmap, prot,
+ 						   PTE_PROTO_NOCACHE);
+  				npv->pv_flags |= PT_NC;
  			}
  		}
  		if ((clear_cache && cacheable_entries >= 4) ||
***************
*** 2273,2280 ****
  			if ((pmap == npv->pv_pmap ||
  			    (kpmap == npv->pv_pmap && other_writable == 0)) && 
  			    (npv->pv_flags & PT_NC)) {
! 				ptes[arm_byte_to_page(npv->pv_va)] |=
! 				    pte_cache_mode;
  				npv->pv_flags &= ~PT_NC;
  			}
  		}
--- 2312,2324 ----
  			if ((pmap == npv->pv_pmap ||
  			    (kpmap == npv->pv_pmap && other_writable == 0)) && 
  			    (npv->pv_flags & PT_NC)) {
! 				prot = (npv->pv_flags & PT_Wr) ?
! 				    VM_PROT_READ | VM_PROT_WRITE :
! 				    VM_PROT_READ;
! 				ptes[arm_byte_to_page(npv->pv_va)] =
! 				    ptes[arm_byte_to_page(npv->pv_va)] |
! 				    pmap_pte_proto(npv->pv_pmap, prot,
! 						   PTE_PROTO_CACHE);
  				npv->pv_flags &= ~PT_NC;
  			}
  		}
***************
*** 2818,2850 ****
  #endif
  
  	/* Construct the pte, giving the correct access. */
! 	npte = (pa & PG_FRAME);
  
! 	/* VA 0 is magic. */
! 	if (pmap != pmap_kernel() && va != 0)
! 		npte |= PT_AP(AP_U);
! 
  	if (pmap_initialized && bank != -1) {
  #ifdef DIAGNOSTIC
  		if ((flags & VM_PROT_ALL) & ~prot)
  			panic("pmap_enter: access_type exceeds prot");
  #endif
! 		npte |= pte_cache_mode;
  		if (flags & VM_PROT_WRITE) {
! 			npte |= L2_SPAGE | PT_AP(AP_W);
  			vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
  		} else if (flags & VM_PROT_ALL) {
! 			npte |= L2_SPAGE;
  			vm_physmem[bank].pmseg.attrs[off] |= PT_H;
! 		} else
! 			npte |= L2_INVAL;
  	} else {
! 		if (prot & VM_PROT_WRITE)
! 			npte |= L2_SPAGE | PT_AP(AP_W);
! 		else if (prot & VM_PROT_ALL)
! 			npte |= L2_SPAGE;
! 		else
! 			npte |= L2_INVAL;
  	}
  
  #ifdef MYCROFT_HACK
--- 2862,2907 ----
  #endif
  
  	/* Construct the pte, giving the correct access. */
! 	KDASSERT((pa & PG_FRAME) == pa);
! 	npte = pa;
  
! 	/*
! 	 * VA 0 is magic; that's where the vector page is.  User pmaps
! 	 * always need to see an un-cached view of this page (which they
! 	 * would anyway, since it's not in the managed page pool, so there
! 	 * is no need to check for it).
! 	 */
  	if (pmap_initialized && bank != -1) {
+ 		KDASSERT(va != 0);
  #ifdef DIAGNOSTIC
  		if ((flags & VM_PROT_ALL) & ~prot)
  			panic("pmap_enter: access_type exceeds prot");
  #endif
! 		/*
! 		 * XXXJRT -- consider optimization potential.
! 		 * C.f. Alpha pmap.
! 		 */
  		if (flags & VM_PROT_WRITE) {
! 			npte |= pmap_pte_proto(pmap,
! 					       VM_PROT_READ|VM_PROT_WRITE,
! 					       PTE_PROTO_CACHE);
  			vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
  		} else if (flags & VM_PROT_ALL) {
! 			npte |= pmap_pte_proto(pmap,
! 					       VM_PROT_READ,
! 					       PTE_PROTO_CACHE);
  			vm_physmem[bank].pmseg.attrs[off] |= PT_H;
! 		}
! 		/*
! 		 * ...else we want to take a fault, so don't do anything
! 		 * to the PTE here.
! 		 */
  	} else {
! 		/*
! 		 * Non-managed pages entered via this interface
! 		 * are implicitly un-cached.
! 		 */
! 		npte |= pmap_pte_proto(pmap, prot, PTE_PROTO_NOCACHE);
  	}
  
  #ifdef MYCROFT_HACK
***************
*** 2920,2926 ****
  	}
  	pte = vtopte(va);
  	KASSERT(!pmap_pte_v(pte));
! 	*pte = L2_PTE(pa, AP_KRW);
  }
  
  void
--- 2977,2990 ----
  	}
  	pte = vtopte(va);
  	KASSERT(!pmap_pte_v(pte));
! #if 1 /* XXX */
! 	*pte = pa | pmap_pte_proto(pmap_kernel(),
! 				   VM_PROT_READ|VM_PROT_WRITE,
! 				   PTE_PROTO_CACHE);
! #else
! 	*pte = pa | pmap_pte_proto(pmap_kernel(), prot,
! 				   PTE_PROTO_CACHE);
! #endif
  }
  
  void
***************
*** 3357,3363 ****
  		pte = pmap_pte(pv->pv_pmap, va);
  		KASSERT(pte != NULL);
  		if (maskbits & (PT_Wr|PT_M)) {
! 			if ((pv->pv_flags & PT_NC)) {
  				/* 
  				 * Entry is not cacheable: reenable
  				 * the cache, nothing to flush
--- 3421,3427 ----
  		pte = pmap_pte(pv->pv_pmap, va);
  		KASSERT(pte != NULL);
  		if (maskbits & (PT_Wr|PT_M)) {
! 			if (pv->pv_flags & PT_NC) {
  				/* 
  				 * Entry is not cacheable: reenable
  				 * the cache, nothing to flush
***************
*** 3375,3406 ****
  				 *
  				 */
  				if (maskbits & PT_Wr) {
! 					*pte |= pte_cache_mode;
  					pv->pv_flags &= ~PT_NC;
  				}
! 			} else if (pmap_is_curpmap(pv->pv_pmap))
! 				/* 
  				 * Entry is cacheable: check if pmap is
! 				 * current if it is flush it,
! 				 * otherwise it won't be in the cache
  				 */
  				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
  
! 			/* make the pte read only */
! 			*pte &= ~PT_AP(AP_W);
  		}
  
! 		if (maskbits & PT_H)
! 			*pte = (*pte & ~L2_MASK) | L2_INVAL;
  
! 		if (pmap_is_curpmap(pv->pv_pmap))
  			/* 
! 			 * if we had cacheable pte's we'd clean the
! 			 * pte out to memory here
! 			 *
! 			 * flush tlb entry as it's in the current pmap
  			 */
  			cpu_tlb_flushID_SE(pv->pv_va); 
  	}
  	cpu_cpwait();
  
--- 3439,3490 ----
  				 *
  				 */
  				if (maskbits & PT_Wr) {
! 					/*
! 					 * Clear the NC bit in the pv
! 					 * entry; we'll update the PTE
! 					 * below.
! 					 */
  					pv->pv_flags &= ~PT_NC;
  				}
! 			} else if (pmap_is_curpmap(pv->pv_pmap)) {
! 				/*
  				 * Entry is cacheable: check if pmap is
! 				 * current, and if it is, flush it,
! 				 * otherwise it won't be in the cache.
  				 */
  				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
+ 			}
  
! 			/* Make the PTE read-only. */
! 			*pte = (*pte & PG_FRAME) |
! 			    pmap_pte_proto(pv->pv_pmap, VM_PROT_READ,
! 					   (pv->pv_flags & PT_NC) ?
! 					   PTE_PROTO_NOCACHE :
! 					   PTE_PROTO_CACHE);
  		}
+ 
+ 		if (maskbits & PT_H) {
+ 			/*
+ 			 * We are going to revoke the mapping for this
+ 			 * page.  If it is writable, make sure to flush
+ 			 * it from the cache.
+ 			 *
+ 			 * XXXJRT This flush might be redundant!
+ 			 */
+ 			if ((pv->pv_flags & PT_Wr) != 0 &&
+ 			    pmap_is_curpmap(pv->pv_pmap))
+ 				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
  
! 			*pte = *pte & PG_FRAME;
! 		}
  
! 		if (pmap_is_curpmap(pv->pv_pmap)) {
  			/* 
! 			 * The PTE has been modifed, and it's in the
! 			 * current pmap, invalidate the TLB entry.
  			 */
  			cpu_tlb_flushID_SE(pv->pv_va); 
+ 		}
  	}
  	cpu_cpwait();
  
***************
*** 3499,3504 ****
--- 3583,3589 ----
  		return(0);
  
  	/* This can happen if user code tries to access kernel memory. */
+ 	/* XXXJRT Use address-based check.  C.f. Alpha pmap. */
  	if ((*pte & PT_AP(AP_W)) != 0)
  		return (0);
  
***************
*** 3540,3546 ****
  	 * already set the cacheable bits based on the assumption that we
  	 * can write to this page.
  	 */
! 	*pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
  	PDEBUG(0, printf("->(%08x)\n", *pte));
  
  	simple_unlock(&pvh->pvh_lock);
--- 3625,3634 ----
  	 * already set the cacheable bits based on the assumption that we
  	 * can write to this page.
  	 */
! 	*pte = (*pte & PG_FRAME) |
! 	    pmap_pte_proto(pmap, VM_PROT_READ|VM_PROT_WRITE,
! 			   (flags & PT_NC) ? PTE_PROTO_NOCACHE
! 					   : PTE_PROTO_CACHE);
  	PDEBUG(0, printf("->(%08x)\n", *pte));
  
  	simple_unlock(&pvh->pvh_lock);
***************
*** 3558,3565 ****
  	vaddr_t va;
  {
  	pt_entry_t *pte;
  	paddr_t pa;
! 	int bank, off;
  
  	PDEBUG(2, printf("pmap_handled_emulation\n"));
  
--- 3646,3654 ----
  	vaddr_t va;
  {
  	pt_entry_t *pte;
+ 	struct pv_head *pvh;
  	paddr_t pa;
! 	int bank, off, flags;
  
  	PDEBUG(2, printf("pmap_handled_emulation\n"));
  
***************
*** 3585,3599 ****
  	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
  		return(0);
  
  	/*
! 	 * Ok we just enable the pte and mark the attibs as handled
  	 */
  	PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
  	    va, pte, *pte));
  	vm_physmem[bank].pmseg.attrs[off] |= PT_H;
! 	*pte = (*pte & ~L2_MASK) | L2_SPAGE;
  	PDEBUG(0, printf("->(%08x)\n", *pte));
  
  	/* Return, indicating the problem has been dealt with */
  	cpu_tlb_flushID_SE(va);
  	cpu_cpwait();
--- 3674,3708 ----
  	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
  		return(0);
  
+ 	PMAP_HEAD_TO_MAP_LOCK();
+ 	/* Get the current flags for this page. */
+ 	pvh = &vm_physmem[bank].pmseg.pvhead[off];
+ 	/* XXX: needed if we hold head->map lock? */
+ 	simple_lock(&pvh->pvh_lock);
+ 
+ 	/*
+ 	 * XXXJRT Get the cacheable/non-cacheable state for this
+ 	 * XXXJRT mapping.  This should die, in favor of stuffing
+ 	 * XXXJRT these bits into the vm_page.
+ 	 */
+ 	flags = pmap_modify_pv(pmap, va, pvh, 0, 0);
+ 
  	/*
! 	 * Ok we just enable the pte and mark the attribs as handled
  	 */
  	PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
  	    va, pte, *pte));
  	vm_physmem[bank].pmseg.attrs[off] |= PT_H;
! 	*pte = (*pte & PG_FRAME) | pmap_pte_proto(pmap,
! 						  VM_PROT_READ,
! 						  (flags & PT_NC) ?
! 						  PTE_PROTO_NOCACHE :
! 						  PTE_PROTO_CACHE);
  	PDEBUG(0, printf("->(%08x)\n", *pte));
  
+ 	simple_unlock(&pvh->pvh_lock);
+ 	PMAP_HEAD_TO_MAP_UNLOCK();
+ 
  	/* Return, indicating the problem has been dealt with */
  	cpu_tlb_flushID_SE(va);
  	cpu_cpwait();
***************
*** 3719,3723 ****
  //	pmap->pm_ptphint = ptp;
  	return (ptp);
  }
  
! /* End of pmap.c */
--- 3828,4030 ----
  //	pmap->pm_ptphint = ptp;
  	return (ptp);
  }
+ 
+ /*
+  * pmap_pte_protos_init:
+  *
+  *	Initialize the prototype PTE arrays.  This is done very
+  *	early, right after the cpufunc vector is selected.
+  */
+ #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
+     defined(CPU_ARM8) || defined(CPU_SA110)
+ void
+ pmap_pte_protos_init_arm678(void)
+ {
+ 	int prot;
+ 
+ #define	CACHE	(PT_B|PT_C)
+ 
+ 	for (prot = 0; prot < 8; prot++) {
+ 		if (prot == 0) {
+ 			pte_proto(PTE_PROTO_KERNEL, prot,
+ 				  PTE_PROTO_CACHE) = 0;
+ 
+ 			pte_proto(PTE_PROTO_KERNEL, prot,
+ 				  PTE_PROTO_NOCACHE) = 0;
+ 
+ 			pte_proto(PTE_PROTO_USER, prot,
+ 				  PTE_PROTO_CACHE) = 0;
+ 
+ 			pte_proto(PTE_PROTO_USER, prot,
+ 				  PTE_PROTO_NOCACHE) = 0;
+ 		} else if (prot & VM_PROT_WRITE) {
+ 			pte_proto(PTE_PROTO_KERNEL, prot,
+ 				  PTE_PROTO_CACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KRW) | CACHE;
+ 
+ 			pte_proto(PTE_PROTO_KERNEL, prot,
+ 				  PTE_PROTO_NOCACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KRW);
+ 
+ 			pte_proto(PTE_PROTO_USER, prot,
+ 				  PTE_PROTO_CACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KRWURW) | CACHE;
+ 
+ 			pte_proto(PTE_PROTO_USER, prot,
+ 				  PTE_PROTO_NOCACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KRWURW);
+ 		} else {
+ 			pte_proto(PTE_PROTO_KERNEL, prot,
+ 				  PTE_PROTO_CACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KR) | CACHE;
+ 
+ 			pte_proto(PTE_PROTO_KERNEL, prot,
+ 				  PTE_PROTO_NOCACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KR);
+ 
+ 			pte_proto(PTE_PROTO_USER, prot,
+ 				  PTE_PROTO_CACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KRWUR) | CACHE;
+ 
+ 			pte_proto(PTE_PROTO_USER, prot,
+ 				  PTE_PROTO_NOCACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KRWUR);
+ 		}
+ 	}
+ #undef CACHE
+ }
+ #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
+ 
+ #if defined(CPU_ARM9)
+ void
+ pmap_pte_protos_init_arm9(void)
+ {
+ 	int prot;
+ 
+ /* Use the cache in write-through mode for now. */
+ #define	CACHE	(PT_C)
+ 
+ 	for (prot = 0; prot < 8; prot++) {
+ 		if (prot == 0) {
+ 			pte_proto(PTE_PROTO_KERNEL, prot,
+ 				  PTE_PROTO_CACHE) = 0;
+ 
+ 			pte_proto(PTE_PROTO_KERNEL, prot,
+ 				  PTE_PROTO_NOCACHE) = 0;
+ 
+ 			pte_proto(PTE_PROTO_USER, prot,
+ 				  PTE_PROTO_CACHE) = 0;
+ 
+ 			pte_proto(PTE_PROTO_USER, prot,
+ 				  PTE_PROTO_NOCACHE) = 0;
+ 		} else if (prot & VM_PROT_WRITE) {
+ 			pte_proto(PTE_PROTO_KERNEL, prot,
+ 				  PTE_PROTO_CACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KRW) | CACHE;
+ 
+ 			pte_proto(PTE_PROTO_KERNEL, prot,
+ 				  PTE_PROTO_NOCACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KRW);
+ 
+ 			pte_proto(PTE_PROTO_USER, prot,
+ 				  PTE_PROTO_CACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KRWURW) | CACHE;
+ 
+ 			pte_proto(PTE_PROTO_USER, prot,
+ 				  PTE_PROTO_NOCACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KRWURW);
+ 		} else {
+ 			pte_proto(PTE_PROTO_KERNEL, prot,
+ 				  PTE_PROTO_CACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KR) | CACHE;
+ 
+ 			pte_proto(PTE_PROTO_KERNEL, prot,
+ 				  PTE_PROTO_NOCACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KR);
+ 
+ 			pte_proto(PTE_PROTO_USER, prot,
+ 				  PTE_PROTO_CACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KRWUR) | CACHE;
+ 
+ 			pte_proto(PTE_PROTO_USER, prot,
+ 				  PTE_PROTO_NOCACHE) =
+ 			    L2_SPAGE | PT_AP(AP_KRWUR);
+ 		}
+ 	}
+ #undef CACHE
+ }
+ #endif /* CPU_ARM9 */
  
! #if defined(CPU_XSCALE)
! void
! pmap_pte_protos_init_xscale(void)
! {
! 	int prot;
! 
! /*
!  * i80200 errata item #40: Store to cacheable memory,
!  * interrupted by an exception, may inadvertently
!  * write to memory.
!  *
!  * This can have an adverse affect on copy-on-write
!  * operation.
!  *
!  * Work-around: Non-writable mappings should have
!  * a cache mode of write-through (this avoids the
!  * problem).  This has no adverse performance affect,
!  * since the mappings are read-only.
!  */
! #define	CACHE_WT	(PT_C)
! #define	CACHE_WB	(PT_C)		/* XXX for now */
! 
! 	for (prot = 0; prot < 8; prot++) {
! 		if (prot == 0) {
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_CACHE) = 0;
! 
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_NOCACHE) = 0;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_CACHE) = 0;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_NOCACHE) = 0;
! 		} else if (prot & VM_PROT_WRITE) {
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRW) | CACHE_WB;
! 
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRW);
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWURW) | CACHE_WB;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWURW);
! 		} else {
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KR) | CACHE_WT;
! 
! 			pte_proto(PTE_PROTO_KERNEL, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KR);
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_CACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWUR) | CACHE_WT;
! 
! 			pte_proto(PTE_PROTO_USER, prot,
! 				  PTE_PROTO_NOCACHE) =
! 			    L2_SPAGE | PT_AP(AP_KRWUR);
! 		}
! 	}
! #undef CACHE_WT
! #undef CACHE_WB
! }
! #endif /* CPU_XSCALE */
Index: footbridge/footbridge_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/footbridge/footbridge_machdep.c,v
retrieving revision 1.5
diff -c -r1.5 footbridge_machdep.c
*** footbridge/footbridge_machdep.c	2002/01/05 22:41:48	1.5
--- footbridge/footbridge_machdep.c	2002/01/31 02:23:30
***************
*** 77,83 ****
  
  	for (loop = 0; loop < cleanarea; loop += NBPG) {
  		pte = pmap_pte(pmap_kernel(), (addr + loop));
! 		*pte = L2_PTE(DC21285_SA_CACHE_FLUSH_BASE + loop, AP_KR);
  	}
  	sa110_cache_clean_addr = addr;
  	sa110_cache_clean_size = cleanarea / 2;
--- 77,84 ----
  
  	for (loop = 0; loop < cleanarea; loop += NBPG) {
  		pte = pmap_pte(pmap_kernel(), (addr + loop));
! 		*pte = L2_SPTE(DC21285_SA_CACHE_FLUSH_BASE + loop, AP_KR,
! 		    pte_cache_mode[VM_PROT_READ]);
  	}
  	sa110_cache_clean_addr = addr;
  	sa110_cache_clean_size = cleanarea / 2;
Index: include/arm32/pmap.h
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/include/arm32/pmap.h,v
retrieving revision 1.20
diff -c -r1.20 pmap.h
*** include/arm32/pmap.h	2002/01/19 16:55:22	1.20
--- include/arm32/pmap.h	2002/01/31 02:23:30
***************
*** 138,147 ****
  } pv_addr_t;
  
  /*
!  * _KERNEL specific macros, functions and prototypes
   */
  
! #ifdef  _KERNEL
  
  /*
   * Commonly referenced structures
--- 138,163 ----
  } pv_addr_t;
  
  /*
!  * Prototype PTE bits for each VM protection code, both cached
!  * and un-cached, kernel and userland.
   */
+ extern pt_entry_t pte_protos[4][8];
  
! #define	PTE_PROTO_KERNEL	0
! #define	PTE_PROTO_USER		1
! #define	PTE_PROTO_CACHE		0
! #define	PTE_PROTO_NOCACHE	2
! 
! #define	pte_proto(ku, prot, cache)					\
! 	pte_protos[(ku) + (cache)][(prot)]
! 
! #define	pmap_pte_proto(pm, prot, cache)					\
! 	pte_proto((pm == pmap_kernel()) ? PTE_PROTO_KERNEL		\
! 					: PTE_PROTO_USER, (prot), (cache))
! 
! void	pmap_pte_protos_init_arm678(void);
! void	pmap_pte_protos_init_arm9(void);
! void	pmap_pte_protos_init_xscale(void);
  
  /*
   * Commonly referenced structures
***************
*** 181,188 ****
   */
  boolean_t	pmap_pageidlezero __P((paddr_t));
  #define PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
- 
- #endif	/* _KERNEL */
  
  /*
   * Useful macros and constants 
--- 197,202 ----
Index: include/arm32/pte.h
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/include/arm32/pte.h,v
retrieving revision 1.1
diff -c -r1.1 pte.h
*** include/arm32/pte.h	2001/11/23 17:39:04	1.1
--- include/arm32/pte.h	2002/01/31 02:23:30
***************
*** 74,85 ****
  #define PT_C		0x08	/* Phys - Cacheable */
  #define PT_U		0x10	/* Phys - Updateable */
  
- #ifndef _LOCORE
- extern pt_entry_t	pte_cache_mode;
- 
- #define PT_CACHEABLE	(pte_cache_mode)
- #endif
- 
  /* Page R/M attributes (in pmseg.attrs). */
  #define PT_M		0x01	/* Virt - Modified */
  #define PT_H		0x02	/* Virt - Handled (Used) */
--- 74,79 ----
***************
*** 106,115 ****
  
  /* PTE construction macros */
  #define	L2_LPTE(p, a, f)	((p) | PT_AP(a) | L2_LPAGE | (f))
- #define L2_SPTE(p, a, f)	((p) | PT_AP(a) | L2_SPAGE | (f))
- #define L2_PTE(p, a)		L2_SPTE((p), (a), PT_CACHEABLE)
- #define L2_PTE_NC(p, a)		L2_SPTE((p), (a), PT_B)
- #define L2_PTE_NC_NB(p, a)	L2_SPTE((p), (a), 0)
  #define L1_SECPTE(p, a, f)	((p) | ((a) << AP_SECTION_SHIFT) | (f) \
  				| L1_SECTION | PT_U)
  
--- 100,105 ----

--aKl9HDSa9q6Cj3fk--