Subject: Re: Using different cache modes for r/o vs r/w pages
To: None <port-arm@netbsd.org>
From: Jason R Thorpe <thorpej@wasabisystems.com>
List: port-arm
Date: 02/01/2002 16:23:08
--Q0rSlbzrZN6k9QnT
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline

On Fri, Feb 01, 2002 at 09:39:29AM -0800, Jason R Thorpe wrote:

 > Attached are the current pmap diffs.

Ok.  I've updated all the ports to compile with the new changes.

ARM portmasters -- please look over these to make sure I didn't
screw up your port.

I'm getting to the point where I'd like to commit some of this stuff soon.

-- 
        -- Jason R. Thorpe <thorpej@wasabisystems.com>

--Q0rSlbzrZN6k9QnT
Content-Type: text/plain; charset=us-ascii
Content-Description: pmap-cache-take5
Content-Disposition: attachment; filename=arm-pmap-diffs

Index: acorn32/acorn32/rpc_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/acorn32/acorn32/rpc_machdep.c,v
retrieving revision 1.10
diff -u -p -r1.10 rpc_machdep.c
--- acorn32/acorn32/rpc_machdep.c	2002/01/25 19:19:22	1.10
+++ acorn32/acorn32/rpc_machdep.c	2002/02/02 00:14:19
@@ -182,16 +182,6 @@ static vaddr_t sa110_cc_base;
 void physcon_display_base	__P((u_int addr));
 extern void consinit		__P((void));
 
-void map_section	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa,
-			     int cacheable));
-void map_pagetable	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-void map_entry		__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-void map_entry_nc	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-void map_entry_ro	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-vm_size_t map_chunk	__P((vm_offset_t pd, vm_offset_t pt, vm_offset_t va,
-			     vm_offset_t pa, vm_size_t size, u_int acc,
-			     u_int flg));
-
 void data_abort_handler		__P((trapframe_t *frame));
 void prefetch_abort_handler	__P((trapframe_t *frame));
 void undefinedinstruction_bounce	__P((trapframe_t *frame));
@@ -381,18 +371,22 @@ struct l1_sec_map {
 	vm_offset_t	va;
 	vm_offset_t	pa;
 	vm_size_t	size;
-	int		flags;
+	int		prot;
+	int		cache;
 } l1_sec_table[] = {
 	/* Map 1Mb section for VIDC20 */
 	{ VIDC_BASE,		VIDC_HW_BASE,
-	    ONE_MB,		0 },
+	    ONE_MB,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
 	/* Map 1Mb section from IOMD */
 	{ IOMD_BASE,		IOMD_HW_BASE,
-	    ONE_MB,		0 },
+	    ONE_MB,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
 	/* Map 1Mb of COMBO (and module space) */
 	{ IO_BASE,		IO_HW_BASE,
-	    ONE_MB,		0 },
-	{ 0, 0, 0, 0 }
+	    ONE_MB,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
+	{ 0, 0, 0, 0, 0 }
 };
 
 
@@ -642,16 +636,16 @@ initarm_new_bootloader(bootconf)
 	l1pagetable = kernel_l1pt.pv_pa;
 
 	/* Map the L2 pages tables in the L1 page table */
-	map_pagetable(l1pagetable, 0x00000000,
+	pmap_map_l2pt(l1pagetable, 0x00000000,
 	    kernel_pt_table[KERNEL_PT_SYS]);
-	map_pagetable(l1pagetable, KERNEL_BASE,
+	pmap_map_l2pt(l1pagetable, KERNEL_BASE,
 	    kernel_pt_table[KERNEL_PT_KERNEL]);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
-		map_pagetable(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
+		pmap_map_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
 		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
-	map_pagetable(l1pagetable, PROCESS_PAGE_TBLS_BASE,
+	pmap_map_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
 	    kernel_ptpt.pv_pa);
-	map_pagetable(l1pagetable, VMEM_VBASE,
+	pmap_map_l2pt(l1pagetable, VMEM_VBASE,
 	    kernel_pt_table[KERNEL_PT_VMEM]);
 
 
@@ -669,25 +663,28 @@ initarm_new_bootloader(bootconf)
 	 */
 	if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
 #if defined(CPU_ARM6) || defined(CPU_ARM7)
-		logical = map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
-		    physical_start, kernexec->a_text,
-		    AP_KRW, PT_CACHEABLE);
+		logical = pmap_map_chunk(l1pagetable, l2pagetable,
+		    KERNEL_TEXT_BASE, physical_start, kernexec->a_text,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 #else	/* CPU_ARM6 || CPU_ARM7 */
-		logical = map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
-		    physical_start, kernexec->a_text,
-		    AP_KR, PT_CACHEABLE);
+		logical = pmap_map_chunk(l1pagetable, l2pagetable,
+		    KERNEL_TEXT_BASE, physical_start, kernexec->a_text,
+		    VM_PROT_READ, PTE_PROTO_CACHE);
 #endif	/* CPU_ARM6 || CPU_ARM7 */
-		logical += map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE + logical,
+		logical += pmap_map_chunk(l1pagetable, l2pagetable,
+		    KERNEL_TEXT_BASE + logical,
 		    physical_start + logical, kerneldatasize - kernexec->a_text,
-		    AP_KRW, PT_CACHEABLE);
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 	} else {	/* !ZMAGIC */
 		/*
 		 * Most likely an ELF kernel ...
-		 * XXX no distinction yet between read only and read/write area's ...
+		 * XXX no distinction yet between read only and read/write
+		 * area's ...
+		 * XXXJRT Yes there is.  FIXME.
 		 */
-		map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
+		pmap_map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
 		    physical_start, kerneldatasize,
-		    AP_KRW, PT_CACHEABLE);
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 	};
 
 
@@ -696,20 +693,24 @@ initarm_new_bootloader(bootconf)
 #endif
 
 	/* Map the stack pages */
-	map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
-	    IRQ_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
-	    ABT_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
-	    UND_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
-	    UPAGES * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
-	    PD_SIZE, AP_KRW, 0);
+	pmap_map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
+	    IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
+	    ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
+	    UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
+	    UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
+	    PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Map the page table that maps the kernel pages */
-	map_entry_nc(l2pagetable, kernel_ptpt.pv_pa - physical_start,
-	    kernel_ptpt.pv_pa);
+	pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa - physical_start,
+	    kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 
 	/* Now we fill in the L2 pagetable for the VRAM */
@@ -723,11 +724,14 @@ initarm_new_bootloader(bootconf)
 	 */
 	l2pagetable = kernel_pt_table[KERNEL_PT_VMEM];
 
-	map_chunk(l1pagetable, l2pagetable, VMEM_VBASE, videomemory.vidm_pbase,
-	    videomemory.vidm_size, AP_KRW, PT_CACHEABLE);
-	map_chunk(l1pagetable, l2pagetable, VMEM_VBASE + videomemory.vidm_size,
+	pmap_map_chunk(l1pagetable, l2pagetable, VMEM_VBASE,
+	    videomemory.vidm_pbase,
+	    videomemory.vidm_size, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(l1pagetable, l2pagetable,
+	    VMEM_VBASE + videomemory.vidm_size,
 	    videomemory.vidm_pbase, videomemory.vidm_size,
-	    AP_KRW, PT_CACHEABLE);
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 
 
 	/*
@@ -736,18 +740,23 @@ initarm_new_bootloader(bootconf)
 	 */
 	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
 	l2pagetable = kernel_ptpt.pv_pa;
-	map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_KERNEL]);
-	map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
-	    kernel_ptpt.pv_pa);
-	map_entry_nc(l2pagetable, (VMEM_VBASE >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_VMEM]);
-	map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_SYS]);
+	pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_KERNEL],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
+	    kernel_ptpt.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (VMEM_VBASE >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_VMEM],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_SYS],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) {
-		map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
+		pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
 		    (loop * 0x00400000)) >> (PGSHIFT-2)),
-		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
+		    kernel_pt_table[KERNEL_PT_VMDATA + loop],
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	}
 
 	/*
@@ -755,7 +764,8 @@ initarm_new_bootloader(bootconf)
 	 * of the virtual memory map.
 	 */
 	l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
-	map_entry(l2pagetable, 0x0000000, systempage.pv_pa);
+	pmap_map_entry(l2pagetable, 0x0000000, systempage.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 
 	/* Map the core memory needed before autoconfig */
 	loop = 0;
@@ -768,9 +778,11 @@ initarm_new_bootloader(bootconf)
 			l1_sec_table[loop].va);
 #endif
 		for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_SEC_SIZE)
-			map_section(l1pagetable, l1_sec_table[loop].va + sz,
-				l1_sec_table[loop].pa + sz,
-				l1_sec_table[loop].flags);
+			pmap_map_section(l1pagetable,
+			    l1_sec_table[loop].va + sz,
+			    l1_sec_table[loop].pa + sz,
+			    l1_sec_table[loop].prot,
+			    l1_sec_table[loop].cache);
 		++loop;
 	}
 
@@ -1046,7 +1058,8 @@ rpc_sa110_cc_setup(void)
 	(void) pmap_extract(pmap_kernel(), KERNEL_TEXT_BASE, &kaddr);
 	for (loop = 0; loop < CPU_SA110_CACHE_CLEAN_SIZE; loop += NBPG) {
 		pte = pmap_pte(pmap_kernel(), (sa110_cc_base + loop));
-		*pte = L2_PTE(kaddr, AP_KR);
+		*pte = kaddr | pte_proto(PTE_PROTO_KERNEL,
+		    VM_PROT_READ, PTE_PROTO_CACHE);
 	}
 	sa110_cache_clean_addr = sa110_cc_base;
 	sa110_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
@@ -1249,10 +1262,12 @@ initarm_old_bootloader(bootconf)
 		 * Now we construct a L2 pagetables for the VRAM
  		 */
 		for (logical = 0; logical < 0x200000; logical += NBPG) {
-			map_entry(l2pagetable + 0x1000, logical,
-			    bootconfig.vram[0].address + logical);
-			map_entry(l2pagetable + 0x1000, logical + 0x200000,
-			    bootconfig.vram[0].address + logical);
+			pmap_map_entry(l2pagetable + 0x1000, logical,
+			    bootconfig.vram[0].address + logical,
+			    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
+			pmap_map_entry(l2pagetable + 0x1000, logical + 0x200000,
+			    bootconfig.vram[0].address + logical,
+			    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 		}
 
 		/*
@@ -1272,8 +1287,9 @@ initarm_old_bootloader(bootconf)
  		 */
 		for (logical = 0; logical < bootconfig.display_size;
 		    logical += NBPG) {
-			map_entry(l2pagetable + 0x1000, logical,
-			    bootconfig.display_phys + logical);
+			pmap_map_entry(l2pagetable + 0x1000, logical,
+			    bootconfig.display_phys + logical,
+			    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 		}
 
 		/*
@@ -1292,27 +1308,31 @@ initarm_old_bootloader(bootconf)
 	 */
 	for (logical = 0; logical < kerneldatasize + bootconfig.scratchsize;
 	    logical += NBPG) {
-		map_entry(l2pagetable + 0x3000, logical,
-		    bootconfig.kernphysicalbase + logical);
+		pmap_map_entry(l2pagetable + 0x3000, logical,
+		    bootconfig.kernphysicalbase + logical,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 	}
 
 	for (logical = 0; logical < 0x400000; logical += NBPG) {
-		map_entry(l2pagetable + 0x2000, logical,
-		    bootconfig.dram[0].address + logical);
+		pmap_map_entry(l2pagetable + 0x2000, logical,
+		    bootconfig.dram[0].address + logical,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 	}
 
 	/*
 	 * Now we construct the L1 pagetable. This only needs the minimum to
 	 * keep us going until we can contruct the proper kernel L1 page table.
 	 */
-	map_section(l1pagetable, VIDC_BASE,  VIDC_HW_BASE, 0);
-	map_section(l1pagetable, IOMD_BASE,  IOMD_HW_BASE, 0);
+	pmap_map_section(l1pagetable, VIDC_BASE, VIDC_HW_BASE,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_section(l1pagetable, IOMD_BASE, IOMD_HW_BASE,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
-	map_pagetable(l1pagetable, 0x00000000,
+	pmap_map_l2pt(l1pagetable, 0x00000000,
 	    bootconfig.scratchphysicalbase + 0x2000);
-	map_pagetable(l1pagetable, KERNEL_BASE,
+	pmap_map_l2pt(l1pagetable, KERNEL_BASE,
 	    bootconfig.scratchphysicalbase + 0x3000);
-	map_pagetable(l1pagetable, VMEM_VBASE,
+	pmap_map_l2pt(l1pagetable, VMEM_VBASE,
 	    bootconfig.scratchphysicalbase + 0x1000);
 
 	/* Print some debugging info */
@@ -1562,16 +1582,16 @@ initarm_old_bootloader(bootconf)
 	l1pagetable = kernel_l1pt.pv_pa - physical_start;
 
 	/* Map the L2 pages tables in the L1 page table */
-	map_pagetable(l1pagetable, 0x00000000,
+	pmap_map_l2pt(l1pagetable, 0x00000000,
 	    kernel_pt_table[KERNEL_PT_SYS]);
-	map_pagetable(l1pagetable, KERNEL_BASE,
+	pmap_map_l2pt(l1pagetable, KERNEL_BASE,
 	    kernel_pt_table[KERNEL_PT_KERNEL]);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
-		map_pagetable(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
+		pmap_map_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
 		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
-	map_pagetable(l1pagetable, PROCESS_PAGE_TBLS_BASE,
+	pmap_map_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
 	    kernel_ptpt.pv_pa);
-	map_pagetable(l1pagetable, VMEM_VBASE,
+	pmap_map_l2pt(l1pagetable, VMEM_VBASE,
 	    kernel_pt_table[KERNEL_PT_VMEM]);
 
 
@@ -1589,41 +1609,46 @@ initarm_old_bootloader(bootconf)
 		 * Other ARM 710 and StrongARM processors don't have a problem.
 		 */
 #if defined(CPU_ARM6) || defined(CPU_ARM7)
-		logical = map_chunk(0, l2pagetable, KERNEL_TEXT_BASE,
+		logical = pmap_map_chunk(0, l2pagetable, KERNEL_TEXT_BASE,
 		    physical_start, kernexec->a_text,
-		    AP_KRW, PT_CACHEABLE);
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 #else	/* CPU_ARM6 || CPU_ARM7 */
-		logical = map_chunk(0, l2pagetable, KERNEL_TEXT_BASE,
+		logical = pmap_map_chunk(0, l2pagetable, KERNEL_TEXT_BASE,
 		    physical_start, kernexec->a_text,
-		    AP_KR, PT_CACHEABLE);
+		    VM_PROT_READ, PTE_PROTO_CACHE);
 #endif	/* CPU_ARM6 || CPU_ARM7 */
-		logical += map_chunk(0, l2pagetable, KERNEL_TEXT_BASE + logical,
+		logical += pmap_map_chunk(0, l2pagetable,
+		    KERNEL_TEXT_BASE + logical,
 		    physical_start + logical, kerneldatasize - kernexec->a_text,
-		    AP_KRW, PT_CACHEABLE);
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 	} else
-		map_chunk(0, l2pagetable, KERNEL_TEXT_BASE,
+		pmap_map_chunk(0, l2pagetable, KERNEL_TEXT_BASE,
 		    physical_start, kerneldatasize,
-		    AP_KRW, PT_CACHEABLE);
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 
 #ifdef VERBOSE_INIT_ARM
 	printf("Constructing L2 page tables\n");
 #endif
 
 	/* Map the stack pages */
-	map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
-	    IRQ_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
-	    ABT_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
-	    UND_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
-	    UPAGES * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
-	    PD_SIZE, AP_KRW, 0);
+	pmap_map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
+	    IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
+	    ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
+	    UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
+	    UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
+	    PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Map the page table that maps the kernel pages */
-	map_entry_nc(l2pagetable, kernel_ptpt.pv_pa - physical_start,
-	    kernel_ptpt.pv_pa);
+	pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa - physical_start,
+	    kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Now we fill in the L2 pagetable for the VRAM */
 
@@ -1636,11 +1661,12 @@ initarm_old_bootloader(bootconf)
 	 */
 	l2pagetable = kernel_pt_table[KERNEL_PT_VMEM] - physical_start;
 
-	map_chunk(0, l2pagetable, VMEM_VBASE, videomemory.vidm_pbase,
-	    videomemory.vidm_size, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, VMEM_VBASE + videomemory.vidm_size,
+	pmap_map_chunk(0, l2pagetable, VMEM_VBASE, videomemory.vidm_pbase,
+	    videomemory.vidm_size, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, VMEM_VBASE + videomemory.vidm_size,
 	    videomemory.vidm_pbase, videomemory.vidm_size,
-	    AP_KRW, PT_CACHEABLE);
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 
 	/*
 	 * Map entries in the page table used to map PTE's
@@ -1648,18 +1674,23 @@ initarm_old_bootloader(bootconf)
 	 */
 	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
 	l2pagetable = kernel_ptpt.pv_pa - physical_start;
-	map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_KERNEL]);
-	map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
-	    kernel_ptpt.pv_pa);
-	map_entry_nc(l2pagetable, (VMEM_VBASE >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_VMEM]);
-	map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_SYS]);
+	pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_KERNEL],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
+	    kernel_ptpt.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (VMEM_VBASE >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_VMEM],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_SYS],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) {
-		map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
+		pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
 		    (loop * 0x00400000)) >> (PGSHIFT-2)),
-		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
+		    kernel_pt_table[KERNEL_PT_VMDATA + loop],
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	}
 
 	/*
@@ -1667,18 +1698,22 @@ initarm_old_bootloader(bootconf)
 	 * of the virtual memory map.
 	 */
 	l2pagetable = kernel_pt_table[KERNEL_PT_SYS] - physical_start;
-	map_entry(l2pagetable, 0x0000000, systempage.pv_pa);
+	pmap_map_entry(l2pagetable, 0x0000000, systempage.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 
 	/* Map the VIDC20, IOMD, COMBO and podules */
 
 	/* Map the VIDC20 */
-	map_section(l1pagetable, VIDC_BASE, VIDC_HW_BASE, 0);
+	pmap_map_section(l1pagetable, VIDC_BASE, VIDC_HW_BASE,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Map the IOMD (and SLOW and MEDIUM simple podules) */
-	map_section(l1pagetable, IOMD_BASE, IOMD_HW_BASE, 0);
+	pmap_map_section(l1pagetable, IOMD_BASE, IOMD_HW_BASE,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Map the COMBO (and module space) */
-	map_section(l1pagetable, IO_BASE, IO_HW_BASE, 0);
+	pmap_map_section(l1pagetable, IO_BASE, IO_HW_BASE,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Bit more debugging info */
 /*	printf("page tables look like this ...\n");
Index: acorn32/podulebus/esc.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/acorn32/podulebus/esc.c,v
retrieving revision 1.4
diff -u -p -r1.4 esc.c
--- acorn32/podulebus/esc.c	2002/01/25 19:19:23	1.4
+++ acorn32/podulebus/esc.c	2002/02/02 00:14:35
@@ -206,7 +206,9 @@ escinitialize(dev)
  * every time we need "bumped" transfer.
  */
 	pte = pmap_pte(pmap_kernel(), (vm_offset_t)dev->sc_bump_va);
-	*pte &= ~PT_C;
+	*pte = (*pte & PG_FRAME) | pmap_pte_proto(pmap_kernel(),
+						  VM_PROT_READ|VM_PROT_WRITE,
+						  PTE_PROTO_NOCACHE);
 	cpu_tlb_flushD();
 	cpu_dcache_wbinv_range((vm_offset_t)dev->sc_bump_va, NBPG);
 
Index: acorn32/podulebus/podulebus.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/acorn32/podulebus/podulebus.c,v
retrieving revision 1.4
diff -u -p -r1.4 podulebus.c
--- acorn32/podulebus/podulebus.c	2001/11/27 00:53:12	1.4
+++ acorn32/podulebus/podulebus.c	2002/02/02 00:14:42
@@ -69,7 +69,6 @@ extern struct bus_space podulebus_bs_tag
 
 /* Declare prototypes */
 
-void map_section __P((vm_offset_t, vm_offset_t, vm_offset_t, int cacheable));
 u_int poduleread __P((u_int, int));
 
 
@@ -436,8 +435,9 @@ podulebusattach(parent, self, aux)
 
 	/* Map the FAST and SYNC simple podules */
 
-	map_section((vm_offset_t)pmap_kernel()->pm_pdir,
-	    SYNC_PODULE_BASE & 0xfff00000, SYNC_PODULE_HW_BASE & 0xfff00000, 0);
+	pmap_map_section((vm_offset_t)pmap_kernel()->pm_pdir,
+	    SYNC_PODULE_BASE & 0xfff00000, SYNC_PODULE_HW_BASE & 0xfff00000,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	cpu_tlb_flushD();
 
 	/* Now map the EASI space */
@@ -447,8 +447,9 @@ podulebusattach(parent, self, aux)
         
 		for (loop1 = loop * EASI_SIZE; loop1 < ((loop + 1) * EASI_SIZE);
 		    loop1 += L1_SEC_SIZE)
-		map_section((vm_offset_t)pmap_kernel()->pm_pdir, EASI_BASE + loop1,
-		    EASI_HW_BASE + loop1, 0);
+		pmap_map_section((vm_offset_t)pmap_kernel()->pm_pdir, EASI_BASE + loop1,
+		    EASI_HW_BASE + loop1,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	}
 	cpu_tlb_flushD();
 
Index: acorn32/podulebus/sfas.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/acorn32/podulebus/sfas.c,v
retrieving revision 1.4
diff -u -p -r1.4 sfas.c
--- acorn32/podulebus/sfas.c	2002/01/25 19:19:23	1.4
+++ acorn32/podulebus/sfas.c	2002/02/02 00:14:56
@@ -207,7 +207,9 @@ sfasinitialize(dev)
  * every time we need "bumped" transfer.
  */
 	pte = pmap_pte(pmap_kernel(), (vm_offset_t)dev->sc_bump_va);
-	*pte &= ~(PT_C | PT_B);
+	*pte = (*pte & PG_FRAME) | pmap_pte_proto(pmap_kernel(),
+						  VM_PROT_READ|VM_PROT_WRITE,
+						  PTE_PROTO_NOCACHE);
 	cpu_tlb_flushD();
 	cpu_dcache_wbinv_range((vm_offset_t)dev->sc_bump_va, NBPG);
 
Index: arm/arm/cpufunc.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm/cpufunc.c,v
retrieving revision 1.29
diff -u -p -r1.29 cpufunc.c
--- arm/arm/cpufunc.c	2002/01/30 00:37:18	1.29
+++ arm/arm/cpufunc.c	2002/02/02 00:15:12
@@ -53,6 +53,9 @@
 #include <sys/types.h>
 #include <sys/param.h>
 #include <sys/systm.h>
+
+#include <uvm/uvm_extern.h>
+
 #include <machine/cpu.h>
 #include <machine/bootconfig.h>
 #include <arch/arm/arm/disassem.h>
@@ -650,6 +653,7 @@ set_cpufuncs()
 		cpu_reset_needs_v4_MMU_disable = 0;
 		/* XXX Cache info? */
 		arm_dcache_align_mask = -1;
+		pmap_pte_protos_init_arm678();
 		return 0;
 	}
 #endif	/* CPU_ARM6 */
@@ -661,6 +665,7 @@ set_cpufuncs()
 		cpu_reset_needs_v4_MMU_disable = 0;
 		/* XXX Cache info? */
 		arm_dcache_align_mask = -1;
+		pmap_pte_protos_init_arm678();
 		return 0;
 	}
 #endif	/* CPU_ARM7 */
@@ -671,6 +676,7 @@ set_cpufuncs()
 		cpufuncs = arm7tdmi_cpufuncs;
 		cpu_reset_needs_v4_MMU_disable = 0;
 		get_cachetype();
+		pmap_pte_protos_init_arm678();
 		return 0;
 	}
 #endif	
@@ -680,15 +686,16 @@ set_cpufuncs()
 		cpufuncs = arm8_cpufuncs;
 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
 		get_cachetype();
+		pmap_pte_protos_init_arm678();
 		return 0;
 	}
 #endif	/* CPU_ARM8 */
 #ifdef CPU_ARM9
 	if (cputype == CPU_ID_ARM920T) {
-		pte_cache_mode = PT_C;	/* Select write-through cacheing. */
 		cpufuncs = arm9_cpufuncs;
 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
 		get_cachetype();
+		pmap_pte_protos_init_arm9();
 		return 0;
 	}
 #endif /* CPU_ARM9 */
@@ -698,6 +705,7 @@ set_cpufuncs()
 		cpufuncs = sa110_cpufuncs;
 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
 		get_cachetype();
+		pmap_pte_protos_init_arm678();		/* XXX */
 		/*
 		 * Enable the right variant of sleeping.
 		 */
@@ -743,7 +751,6 @@ set_cpufuncs()
 			:
 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
 
-		pte_cache_mode = PT_C;	/* Select write-through cacheing. */
 		cpufuncs = xscale_cpufuncs;
 
 		/*
@@ -758,6 +765,7 @@ set_cpufuncs()
 
 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
 		get_cachetype();
+		pmap_pte_protos_init_xscale();
 		return 0;
 	}
 #endif /* CPU_XSCALE */
Index: arm/arm32/arm32_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm32/arm32_machdep.c,v
retrieving revision 1.11
diff -u -p -r1.11 arm32_machdep.c
--- arm/arm32/arm32_machdep.c	2002/01/20 03:41:47	1.11
+++ arm/arm32/arm32_machdep.c	2002/02/02 00:15:18
@@ -150,177 +150,6 @@ bootsync(void)
 }
 
 /*
- * A few functions that are used to help construct the page tables
- * during the bootstrap process.
- */
-
-void
-map_section(pagetable, va, pa, cacheable)
-	vaddr_t pagetable;
-	vaddr_t va;
-	paddr_t pa;
-	int cacheable;
-{
-#ifdef	DIAGNOSTIC
-	if (((va | pa) & (L1_SEC_SIZE - 1)) != 0)
-		panic("initarm: Cannot allocate 1MB section on non 1MB boundry\n");
-#endif	/* DIAGNOSTIC */
-
-	if (cacheable)
-		((u_int *)pagetable)[(va >> PDSHIFT)] =
-		    L1_SEC((pa & PD_MASK), pte_cache_mode);
-	else
-		((u_int *)pagetable)[(va >> PDSHIFT)] =
-		    L1_SEC((pa & PD_MASK), 0);
-}
-
-
-void
-map_pagetable(pagetable, va, pa)
-	vaddr_t pagetable;
-	vaddr_t va;
-	paddr_t pa;
-{
-#ifdef	DIAGNOSTIC
-	if ((pa & 0xc00) != 0)
-		panic("pagetables should be group allocated on pageboundry");
-#endif	/* DIAGNOSTIC */
-
-	((u_int *)pagetable)[(va >> PDSHIFT) + 0] =
-	     L1_PTE((pa & PG_FRAME) + 0x000);
-	((u_int *)pagetable)[(va >> PDSHIFT) + 1] =
-	     L1_PTE((pa & PG_FRAME) + 0x400);
-	((u_int *)pagetable)[(va >> PDSHIFT) + 2] =
-	     L1_PTE((pa & PG_FRAME) + 0x800);
-	((u_int *)pagetable)[(va >> PDSHIFT) + 3] =
-	     L1_PTE((pa & PG_FRAME) + 0xc00);
-}
-
-/* cats kernels have a 2nd l2 pt, so the range is bigger hence the 0x7ff etc */
-vsize_t
-map_chunk(pd, pt, va, pa, size, acc, flg)
-	vaddr_t pd;
-	vaddr_t pt;
-	vaddr_t va;
-	paddr_t pa;
-	vsize_t size;
-	u_int acc;
-	u_int flg;
-{
-	pd_entry_t *l1pt = (pd_entry_t *)pd;
-	pt_entry_t *l2pt = (pt_entry_t *)pt;
-	vsize_t remain;
-	u_int loop;
-
-	remain = (size + (NBPG - 1)) & ~(NBPG - 1);
-#ifdef VERBOSE_INIT_ARM
-	printf("map_chunk: pa=%lx va=%lx sz=%lx rem=%lx acc=%x flg=%x\n",
-	    pa, va, size, remain, acc, flg);
-	printf("map_chunk: ");
-#endif
-	size = remain;
-
-	while (remain > 0) {
-		/* Can we do a section mapping ? */
-		if (l1pt && !((pa | va) & (L1_SEC_SIZE - 1))
-		    && remain >= L1_SEC_SIZE) {
-#ifdef VERBOSE_INIT_ARM
-			printf("S");
-#endif
-			l1pt[(va >> PDSHIFT)] = L1_SECPTE(pa, acc, flg);
-			va += L1_SEC_SIZE;
-			pa += L1_SEC_SIZE;
-			remain -= L1_SEC_SIZE;
-		} else
-		/* Can we do a large page mapping ? */
-		if (!((pa | va) & (L2_LPAGE_SIZE - 1))
-		    && (remain >= L2_LPAGE_SIZE)) {
-#ifdef VERBOSE_INIT_ARM
-			printf("L");
-#endif
-			for (loop = 0; loop < 16; ++loop)
-#ifndef cats
-				l2pt[((va >> PGSHIFT) & 0x3f0) + loop] =
-				    L2_LPTE(pa, acc, flg);
-#else
-				l2pt[((va >> PGSHIFT) & 0x7f0) + loop] =
-				    L2_LPTE(pa, acc, flg);
-#endif	
-			va += L2_LPAGE_SIZE;
-			pa += L2_LPAGE_SIZE;
-			remain -= L2_LPAGE_SIZE;
-		} else
-		/* All we can do is a small page mapping */
-		{
-#ifdef VERBOSE_INIT_ARM
-			printf("P");
-#endif
-#ifndef cats			
-			l2pt[((va >> PGSHIFT) & 0x3ff)] = L2_SPTE(pa, acc, flg);
-#else
-			l2pt[((va >> PGSHIFT) & 0x7ff)] = L2_SPTE(pa, acc, flg);
-#endif
-			va += NBPG;
-			pa += NBPG;
-			remain -= NBPG;
-		}
-	}
-#ifdef VERBOSE_INIT_ARM
-	printf("\n");
-#endif
-	return(size);
-}
-
-/* cats versions have larger 2 l2pt's next to each other */
-void
-map_entry(pagetable, va, pa)
-	vaddr_t pagetable;
-	vaddr_t va;
-	paddr_t pa;
-{
-#ifndef cats
-	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
-	    L2_PTE((pa & PG_FRAME), AP_KRW);
-#else
-	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
-	    L2_PTE((pa & PG_FRAME), AP_KRW);
-#endif	
-}
-
-
-void
-map_entry_nc(pagetable, va, pa)
-	vaddr_t pagetable;
-	vaddr_t va;
-	paddr_t pa;
-{
-#ifndef cats
-	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
-	    L2_PTE_NC_NB((pa & PG_FRAME), AP_KRW);
-#else
-	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
-	    L2_PTE_NC_NB((pa & PG_FRAME), AP_KRW);
-#endif
-}
-
-
-void
-map_entry_ro(pagetable, va, pa)
-	vaddr_t pagetable;
-	vaddr_t va;
-	paddr_t pa;
-{
-#ifndef cats
-	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
-	    L2_PTE((pa & PG_FRAME), AP_KR);
-#else
-	((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
-	    L2_PTE((pa & PG_FRAME), AP_KR);
-#endif
-}
-
-
-/*
  * void cpu_startup(void)
  *
  * Machine dependant startup code. 
@@ -486,8 +315,12 @@ cpu_startup()
 void
 zero_page_readonly()
 {
+
+	/* XXXJRT Do we really care about caching page0?! */
 	WriteWord(PROCESS_PAGE_TBLS_BASE + 0,
-	    L2_PTE((systempage.pv_pa & PG_FRAME), AP_KR));
+	    systempage.pv_pa | pte_proto(PTE_PROTO_KERNEL,
+					 VM_PROT_READ,
+					 PTE_PROTO_CACHE));
 	cpu_tlb_flushID_SE(0x00000000);
 }
 
@@ -502,8 +335,12 @@ zero_page_readonly()
 void
 zero_page_readwrite()
 {
+
+	/* XXXJRT See above. */
 	WriteWord(PROCESS_PAGE_TBLS_BASE + 0,
-	    L2_PTE((systempage.pv_pa & PG_FRAME), AP_KRW));
+	    systempage.pv_pa | pte_proto(PTE_PROTO_KERNEL,
+					 VM_PROT_READ|VM_PROT_WRITE,
+					 PTE_PROTO_CACHE));
 	cpu_tlb_flushID_SE(0x00000000);
 }
 
@@ -618,5 +455,3 @@ parse_mi_bootargs(args)
 		if (integer)
 			boothowto |= AB_VERBOSE;
 }
-
-/* End of machdep.c */
Index: arm/arm32/bus_dma.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm32/bus_dma.c,v
retrieving revision 1.8
diff -u -p -r1.8 bus_dma.c
--- arm/arm32/bus_dma.c	2002/01/25 20:57:41	1.8
+++ arm/arm32/bus_dma.c	2002/02/02 00:15:25
@@ -547,7 +547,10 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma
 				cpu_dcache_wbinv_range(va, NBPG);
 				cpu_drain_writebuf();
 				ptep = vtopte(va);
-				*ptep = ((*ptep) & (~PT_C | PT_B));
+				*ptep = (*ptep & PG_FRAME) |
+				    pmap_pte_proto(pmap_kernel(),
+						   VM_PROT_READ|VM_PROT_WRITE,
+						   PTE_PROTO_NOCACHE);
 				tlb_flush();
 			}
 #ifdef DEBUG_DMA
Index: arm/arm32/pmap.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm32/pmap.c,v
retrieving revision 1.36
diff -u -p -r1.36 pmap.c
--- arm/arm32/pmap.c	2002/01/25 19:19:25	1.36
+++ arm/arm32/pmap.c	2002/02/02 00:16:02
@@ -1,6 +1,7 @@
 /*	$NetBSD: pmap.c,v 1.36 2002/01/25 19:19:25 thorpej Exp $	*/
 
 /*
+ * Copyright (c) 2002 Wasabi Systems, Inc.
  * Copyright (c) 2001 Richard Earnshaw
  * Copyright (c) 2001 Christopher Gilbert
  * All rights reserved.
@@ -193,6 +194,7 @@ pt_entry_t msgbufpte;
 extern caddr_t msgbufaddr;
 
 boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
+
 /*
  * locking data structures
  */
@@ -241,7 +243,7 @@ static struct pv_entry	*pmap_alloc_pv __
 #define ALLOCPV_TRY	1	/* just try to allocate, don't steal */
 #define ALLOCPV_NONEED	2	/* don't need PV, just growing cache */
 static struct pv_entry	*pmap_alloc_pvpage __P((struct pmap *, int));
-static void		 pmap_enter_pv __P((struct pv_head *,
+static void		 pmap_enter_pv __P((struct vm_page *,
 					    struct pv_entry *, struct pmap *,
 					    vaddr_t, struct vm_page *, int));
 static void		 pmap_free_pv __P((struct pmap *, struct pv_entry *));
@@ -249,27 +251,26 @@ static void		 pmap_free_pvs __P((struct 
 static void		 pmap_free_pv_doit __P((struct pv_entry *));
 static void		 pmap_free_pvpage __P((void));
 static boolean_t	 pmap_is_curpmap __P((struct pmap *));
-static struct pv_entry	*pmap_remove_pv __P((struct pv_head *, struct pmap *, 
+static struct pv_entry	*pmap_remove_pv __P((struct vm_page *, struct pmap *, 
 			vaddr_t));
 #define PMAP_REMOVE_ALL		0	/* remove all mappings */
 #define PMAP_REMOVE_SKIPWIRED	1	/* skip wired mappings */
 
-static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct pv_head *,
+static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
 	u_int, u_int));
 
 static void pmap_free_l1pt __P((struct l1pt *));
 static int pmap_allocpagedir __P((struct pmap *));
 static int pmap_clean_page __P((struct pv_entry *, boolean_t));
-static struct pv_head *pmap_find_pvh __P((paddr_t));
-static void pmap_remove_all __P((paddr_t));
+static void pmap_remove_all __P((struct vm_page *));
 
 
 vsize_t npages;
 
 static struct vm_page	*pmap_alloc_ptp __P((struct pmap *, vaddr_t, boolean_t));
 static struct vm_page	*pmap_get_ptp __P((struct pmap *, vaddr_t, boolean_t));
-__inline static void pmap_clearbit __P((paddr_t, unsigned int));
-__inline static boolean_t pmap_testbit __P((paddr_t, unsigned int));
+__inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
+__inline static boolean_t pmap_testbit __P((struct vm_page *, unsigned int));
 
 extern paddr_t physical_start;
 extern paddr_t physical_freestart;
@@ -303,7 +304,7 @@ int l1pt_reuse_count;			/* stat - L1's r
 
 /* Local function prototypes (not used outside this file) */
 pt_entry_t *pmap_pte __P((struct pmap *pmap, vaddr_t va));
-void pmap_copy_on_write __P((paddr_t pa));
+void pmap_copy_on_write __P((struct vm_page *));
 void pmap_pinit __P((struct pmap *));
 void pmap_freepagedir __P((struct pmap *));
 
@@ -318,19 +319,21 @@ static __inline void pmap_map_in_l1 __P(
 static pt_entry_t *pmap_map_ptes __P((struct pmap *));
 static void pmap_unmap_ptes __P((struct pmap *));
 
-__inline static void pmap_vac_me_harder __P((struct pmap *, struct pv_head *,
+__inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
     pt_entry_t *, boolean_t));
-static void pmap_vac_me_kpmap __P((struct pmap *, struct pv_head *,
+static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
     pt_entry_t *, boolean_t));
-static void pmap_vac_me_user __P((struct pmap *, struct pv_head *,
+static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
     pt_entry_t *, boolean_t));
 
 /*
- * Cache enable bits in PTE to use on pages that are cacheable.
- * On most machines this is cacheable/bufferable, but on some, eg arm10, we
- * can chose between write-through and write-back cacheing.
+ * Prototype PTE and L1 section descriptor arrays.  These are initialized
+ * in pmap_pte_protos_init_*().
  */
-pt_entry_t pte_cache_mode = (PT_C | PT_B);
+pt_entry_t pte_protos[4][8];
+pd_entry_t l1sec_protos[2][8];
+pt_entry_t lpte_protos[2][8];
+pd_entry_t pde_proto;
 
 /*
  * real definition of pv_entry.
@@ -799,15 +802,15 @@ pmap_free_pvpage()
 
 /*
  * main pv_entry manipulation functions:
- *   pmap_enter_pv: enter a mapping onto a pv_head list
- *   pmap_remove_pv: remove a mappiing from a pv_head list
+ *   pmap_enter_pv: enter a mapping onto a vm_page list
+ *   pmap_remove_pv: remove a mappiing from a vm_page list
  *
  * NOTE: pmap_enter_pv expects to lock the pvh itself
  *       pmap_remove_pv expects te caller to lock the pvh before calling
  */
 
 /*
- * pmap_enter_pv: enter a mapping onto a pv_head lst
+ * pmap_enter_pv: enter a mapping onto a vm_page lst
  *
  * => caller should hold the proper lock on pmap_main_lock
  * => caller should have pmap locked
@@ -817,8 +820,8 @@ pmap_free_pvpage()
  */
 
 __inline static void
-pmap_enter_pv(pvh, pve, pmap, va, ptp, flags)
-	struct pv_head *pvh;
+pmap_enter_pv(pg, pve, pmap, va, ptp, flags)
+	struct vm_page *pg;
 	struct pv_entry *pve;	/* preallocated pve for us to use */
 	struct pmap *pmap;
 	vaddr_t va;
@@ -829,10 +832,12 @@ pmap_enter_pv(pvh, pve, pmap, va, ptp, f
 	pve->pv_va = va;
 	pve->pv_ptp = ptp;			/* NULL for kernel pmap */
 	pve->pv_flags = flags;
-	simple_lock(&pvh->pvh_lock);		/* lock pv_head */
-	pve->pv_next = pvh->pvh_list;		/* add to ... */
-	pvh->pvh_list = pve;			/* ... locked list */
-	simple_unlock(&pvh->pvh_lock);		/* unlock, done! */
+
+	simple_lock(&pg->mdpage.pvh_slock);	/* lock pv_head */
+	pve->pv_next = pg->mdpage.pvh_list;	/* add to ... */
+	pg->mdpage.pvh_list = pve;		/* ... locked list */
+	simple_unlock(&pg->mdpage.pvh_slock);	/* unlock, done! */
+
 	if (pve->pv_flags & PT_W)
 		++pmap->pm_stats.wired_count;
 }
@@ -849,14 +854,14 @@ pmap_enter_pv(pvh, pve, pmap, va, ptp, f
  */
 
 __inline static struct pv_entry *
-pmap_remove_pv(pvh, pmap, va)
-	struct pv_head *pvh;
+pmap_remove_pv(pg, pmap, va)
+	struct vm_page *pg;
 	struct pmap *pmap;
 	vaddr_t va;
 {
 	struct pv_entry *pve, **prevptr;
 
-	prevptr = &pvh->pvh_list;		/* previous pv_entry pointer */
+	prevptr = &pg->mdpage.pvh_list;		/* previous pv_entry pointer */
 	pve = *prevptr;
 	while (pve) {
 		if (pve->pv_pmap == pmap && pve->pv_va == va) {	/* match? */
@@ -872,7 +877,6 @@ pmap_remove_pv(pvh, pmap, va)
 }
 
 /*
- *
  * pmap_modify_pv: Update pv flags
  *
  * => caller should hold lock on pv_head [so that attrs can be adjusted]
@@ -886,10 +890,10 @@ pmap_remove_pv(pvh, pmap, va)
 
 /*__inline */ 
 static u_int
-pmap_modify_pv(pmap, va, pvh, bic_mask, eor_mask)
+pmap_modify_pv(pmap, va, pg, bic_mask, eor_mask)
 	struct pmap *pmap;
 	vaddr_t va;
-	struct pv_head *pvh;
+	struct vm_page *pg;
 	u_int bic_mask;
 	u_int eor_mask;
 {
@@ -900,7 +904,7 @@ pmap_modify_pv(pmap, va, pvh, bic_mask, 
 	 * There is at least one VA mapping this page.
 	 */
 
-	for (npv = pvh->pvh_list; npv; npv = npv->pv_next) {
+	for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
 		if (pmap == npv->pv_pmap && va == npv->pv_va) {
 			oflags = npv->pv_flags;
 			npv->pv_flags = flags =
@@ -933,22 +937,27 @@ pmap_map_in_l1(pmap, va, l2pa, selfref)
 	/* Calculate the index into the L1 page table. */
 	ptva = (va >> PDSHIFT) & ~3;
 
-	PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
-	    pmap->pm_pdir, L1_PTE(l2pa), ptva));
+	PDEBUG(0, printf("wiring %08lx in to pd=%p pte=0x%lx va=0x%lx\n",
+	    l2pa, pmap->pm_pdir, l2pa | pde_proto, ptva));
 
 	/* Map page table into the L1. */
-	pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
-	pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
-	pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
-	pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
+	pmap->pm_pdir[ptva + 0] = (l2pa + 0x000) | pde_proto;
+	pmap->pm_pdir[ptva + 1] = (l2pa + 0x400) | pde_proto;
+	pmap->pm_pdir[ptva + 2] = (l2pa + 0x800) | pde_proto;
+	pmap->pm_pdir[ptva + 3] = (l2pa + 0xc00) | pde_proto;
 
 	PDEBUG(0, printf("pt self reference %lx in %lx\n",
-	    L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
+	    l2pa | pmap_pte_proto(pmap_kernel(),
+				  VM_PROT_READ|VM_PROT_WRITE,
+				  PTE_PROTO_NOCACHE),
+			 pmap->pm_vptpt));
 
 	/* Map the page table into the page table area. */
 	if (selfref) {
 		*((pt_entry_t *)(pmap->pm_vptpt + ptva)) =
-			L2_PTE_NC_NB(l2pa, AP_KRW);
+		    l2pa | pmap_pte_proto(pmap_kernel(),
+					  VM_PROT_READ|VM_PROT_WRITE,
+					  PTE_PROTO_NOCACHE);
 	}
 	/* XXX should be a purge */
 /*	cpu_tlb_flushD();*/
@@ -1146,26 +1155,6 @@ pmap_bootstrap(kernel_l1pt, kernel_ptpt)
 	TAILQ_INIT(&pv_unusedpgs);
 
 	/*
-	 * compute the number of pages we have and then allocate RAM
-	 * for each pages' pv_head and saved attributes.
-	 */
-	{
-	       	int npages, lcv;
-		vsize_t s;
-
-		npages = 0;
-		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
-			npages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
-		s = (vsize_t) (sizeof(struct pv_head) * npages +
-				sizeof(char) * npages);
-		s = round_page(s); /* round up */
-		boot_head = (char *)uvm_pageboot_alloc(s);
-		bzero((char *)boot_head, s);
-		if (boot_head == 0)
-			panic("pmap_init: unable to allocate pv_heads");
-	}
-	
-	/*
 	 * initialize the pmap pool.
 	 */
 
@@ -1188,11 +1177,6 @@ extern int physmem;
 void
 pmap_init()
 {
-	int lcv, i;
-    
-#ifdef MYCROFT_HACK
-	printf("physmem = %d\n", physmem);
-#endif
 
 	/*
 	 * Set the available memory vars - These do not map to real memory
@@ -1204,25 +1188,6 @@ pmap_init()
 	avail_start = 0;
 	avail_end = physmem * NBPG;
 
-	/* allocate pv_head stuff first */
-	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
-		vm_physmem[lcv].pmseg.pvhead = (struct pv_head *)boot_head;
-		boot_head = (char *)(vaddr_t)(vm_physmem[lcv].pmseg.pvhead +
-				 (vm_physmem[lcv].end - vm_physmem[lcv].start));
-		for (i = 0;
-		     i < (vm_physmem[lcv].end - vm_physmem[lcv].start); i++) {
-			simple_lock_init(
-			    &vm_physmem[lcv].pmseg.pvhead[i].pvh_lock);
-		}
-	}
-
-	/* now allocate attrs */
-	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
-		vm_physmem[lcv].pmseg.attrs = (char *) boot_head;
-		boot_head = (char *)(vaddr_t)(vm_physmem[lcv].pmseg.attrs +
-				 (vm_physmem[lcv].end - vm_physmem[lcv].start));
-	}
-
 	/*
 	 * now we need to free enough pv_entry structures to allow us to get
 	 * the kmem_map/kmem_object allocated and inited (done after this
@@ -1238,14 +1203,6 @@ pmap_init()
 	pv_nfpvents = 0;
 	(void) pmap_add_pvpage(pv_initpage, FALSE);
 
-#ifdef MYCROFT_HACK
-	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
-		printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
-		    lcv,
-		    vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
-		    vm_physmem[lcv].start, vm_physmem[lcv].end);
-	}
-#endif
 	pmap_initialized = TRUE;
     
 	/* Initialise our L1 page table queues and counters */
@@ -1392,7 +1349,11 @@ pmap_alloc_l1pt(void)
 
 		/* Revoke cacheability and bufferability */
 		/* XXX should be done better than this */
-		ptes[arm_byte_to_page(va)] &= ~(PT_C | PT_B);
+		ptes[arm_byte_to_page(va)] =
+		    (ptes[arm_byte_to_page(va)] & PG_FRAME) |
+		    pmap_pte_proto(pmap_kernel(),
+				   VM_PROT_READ|VM_PROT_WRITE,
+				   PTE_PROTO_NOCACHE);
 
 		va += NBPG;
 		m = m->pageq.tqe_next;
@@ -1506,7 +1467,9 @@ pmap_allocpagedir(pmap)
 	/* Revoke cacheability and bufferability */
 	/* XXX should be done better than this */
 	pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
-	*pte = *pte & ~(PT_C | PT_B);
+	*pte = (*pte & PG_FRAME) | pmap_pte_proto(pmap_kernel(),
+						  VM_PROT_READ|VM_PROT_WRITE,
+						  PTE_PROTO_NOCACHE);
 
 	/* Wire in this page table */
 	pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt, TRUE);
@@ -1836,25 +1799,6 @@ pmap_clean_page(pv, is_src)
 }
 
 /*
- * pmap_find_pv()
- *
- * This is a local function that finds a PV head for a given physical page.
- * This is a common op, and this function removes loads of ifdefs in the code.
- */
-static __inline struct pv_head *
-pmap_find_pvh(phys)
-	paddr_t phys;
-{
-	int bank, off;
-	struct pv_head *pvh;
-
-	if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
-		panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
-	pvh = &vm_physmem[bank].pmseg.pvhead[off];
-	return (pvh);
-}
-
-/*
  * pmap_zero_page()
  * 
  * Zero a given physical page by mapping it at a page hook point.
@@ -1866,22 +1810,29 @@ void
 pmap_zero_page(phys)
 	paddr_t phys;
 {
-	struct pv_head *pvh;
+	struct vm_page *pg;
 
 	/* Get an entry for this page, and clean it it. */
-	pvh = pmap_find_pvh(phys);
-	simple_lock(&pvh->pvh_lock);
-	pmap_clean_page(pvh->pvh_list, FALSE);
-	simple_unlock(&pvh->pvh_lock);
+	pg = PHYS_TO_VM_PAGE(phys);
+	simple_lock(&pg->mdpage.pvh_slock);
+	pmap_clean_page(pg->mdpage.pvh_list, FALSE);
+	simple_unlock(&pg->mdpage.pvh_slock);
 	
 	/*
 	 * Hook in the page, zero it, and purge the cache for that
 	 * zeroed page. Invalidate the TLB as needed.
 	 */
-	*page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
+	KDASSERT((phys & PG_FRAME) == phys);
+	*page_hook0.pte = phys |
+	    pmap_pte_proto(pmap_kernel(),
+			   VM_PROT_READ|VM_PROT_WRITE,
+			   PTE_PROTO_CACHE);
 	cpu_tlb_flushD_SE(page_hook0.va);
+
 	cpu_cpwait();
+
 	bzero_page(page_hook0.va);
+
 	cpu_dcache_wbinv_range(page_hook0.va, NBPG);
 }
 
@@ -1899,10 +1850,10 @@ pmap_pageidlezero(phys)
 	boolean_t rv = TRUE;
 	
 #ifdef DIAGNOSTIC
-	struct pv_head *pvh;
+	struct vm_page *pg;
 	
-	pvh = pmap_find_pvh(phys);
-	if (pvh->pvh_list != NULL)
+	pg = PHYS_TO_VM_PAGE(phys);
+	if (pg->mdpage.pvh_list != NULL)
 		panic("pmap_pageidlezero: zeroing mapped page\n");
 #endif
 	
@@ -1910,7 +1861,11 @@ pmap_pageidlezero(phys)
 	 * Hook in the page, zero it, and purge the cache for that
 	 * zeroed page. Invalidate the TLB as needed.
 	 */
-	*page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
+	KDASSERT((phys & PG_FRAME) == phys);
+	*page_hook0.pte = phys |
+	    pmap_pte_proto(pmap_kernel(),
+			   VM_PROT_READ|VM_PROT_WRITE,
+			   PTE_PROTO_CACHE);
 	cpu_tlb_flushD_SE(page_hook0.va);
 	cpu_cpwait();
 
@@ -1950,33 +1905,45 @@ pmap_copy_page(src, dest)
 	paddr_t src;
 	paddr_t dest;
 {
-	struct pv_head *src_pvh, *dest_pvh;
+	struct vm_page *src_pg, *dest_pg;
 	boolean_t cleanedcache;
 	
 	/* Get PV entries for the pages, and clean them if needed. */
-	src_pvh = pmap_find_pvh(src);
+	src_pg = PHYS_TO_VM_PAGE(src);
 	
-	simple_lock(&src_pvh->pvh_lock);
-	cleanedcache = pmap_clean_page(src_pvh->pvh_list, TRUE);
-	simple_unlock(&src_pvh->pvh_lock);
+	simple_lock(&src_pg->mdpage.pvh_slock);
+	cleanedcache = pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
+	simple_unlock(&src_pg->mdpage.pvh_slock);
 
 	if (cleanedcache == 0) { 
-		dest_pvh = pmap_find_pvh(dest);
-		simple_lock(&dest_pvh->pvh_lock);
-		pmap_clean_page(dest_pvh->pvh_list, FALSE);
-		simple_unlock(&dest_pvh->pvh_lock);
+		dest_pg = PHYS_TO_VM_PAGE(dest);
+		simple_lock(&dest_pg->mdpage.pvh_slock);
+		pmap_clean_page(dest_pg->mdpage.pvh_list, FALSE);
+		simple_unlock(&dest_pg->mdpage.pvh_slock);
 	}
 	/*
 	 * Map the pages into the page hook points, copy them, and purge
 	 * the cache for the appropriate page. Invalidate the TLB
 	 * as required.
 	 */
-	*page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
-	*page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
+	KDASSERT((src & PG_FRAME) == src);
+	*page_hook0.pte = src |		/* XXX should be r/o */
+	    pmap_pte_proto(pmap_kernel(),
+			   VM_PROT_READ|VM_PROT_WRITE,
+			   PTE_PROTO_CACHE);
 	cpu_tlb_flushD_SE(page_hook0.va);
+
+	KDASSERT((dst & PG_FRAME) == dst);
+	*page_hook1.pte = dest |
+	    pmap_pte_proto(pmap_kernel(),
+			   VM_PROT_READ|VM_PROT_WRITE,
+			   PTE_PROTO_CACHE);
 	cpu_tlb_flushD_SE(page_hook1.va);
+
 	cpu_cpwait();
+
 	bcopy_page(page_hook0.va, page_hook1.va);
+
 	cpu_dcache_wbinv_range(page_hook0.va, NBPG);
 	cpu_dcache_wbinv_range(page_hook1.va, NBPG);
 }
@@ -2069,17 +2036,17 @@ pmap_pte_delref(pmap, va)
  * Note that the pmap must have it's ptes mapped in, and passed with ptes.
  */
 __inline static void
-pmap_vac_me_harder(struct pmap *pmap, struct pv_head *pvh, pt_entry_t *ptes,
+pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
 	boolean_t clear_cache)
 {
 	if (pmap == pmap_kernel())
-		pmap_vac_me_kpmap(pmap, pvh, ptes, clear_cache);
+		pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
 	else
-		pmap_vac_me_user(pmap, pvh, ptes, clear_cache);
+		pmap_vac_me_user(pmap, pg, ptes, clear_cache);
 }
 
 static void
-pmap_vac_me_kpmap(struct pmap *pmap, struct pv_head *pvh, pt_entry_t *ptes,
+pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
 	boolean_t clear_cache)
 {
 	int user_entries = 0;
@@ -2101,7 +2068,7 @@ pmap_vac_me_kpmap(struct pmap *pmap, str
 	 * this page.  Calculate whether there are user-writable or
 	 * kernel-writable pages.
 	 */
-	for (pv = pvh->pvh_list; pv != NULL; pv = pv->pv_next) {
+	for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
 		if (pv->pv_pmap != pmap) {
 			user_entries++;
 			if (pv->pv_flags & PT_Wr)
@@ -2132,7 +2099,7 @@ pmap_vac_me_kpmap(struct pmap *pmap, str
 		 * might not be set correctly, call pmap_vac_me_user
 		 * to recalculate the settings.
 		 */
-		for (pv = pvh->pvh_list; pv; pv = pv->pv_next) {
+		for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
 			/* 
 			 * We know kernel mappings will get set
 			 * correctly in other calls.  We also know
@@ -2167,7 +2134,7 @@ pmap_vac_me_kpmap(struct pmap *pmap, str
 			pmap_unmap_ptes(last_pmap);
 			last_pmap = pv->pv_pmap;
 			ptes = pmap_map_ptes(last_pmap);
-			pmap_vac_me_user(last_pmap, pvh, ptes, 
+			pmap_vac_me_user(last_pmap, pg, ptes, 
 			    pmap_is_curpmap(last_pmap));
 		}
 		/* Restore the pte mapping that was passed to us.  */
@@ -2179,12 +2146,11 @@ pmap_vac_me_kpmap(struct pmap *pmap, str
 			return;
 	}
 
-	pmap_vac_me_user(pmap, pvh, ptes, clear_cache);
-	return;
+	pmap_vac_me_user(pmap, pg, ptes, clear_cache);
 }
 
 static void
-pmap_vac_me_user(struct pmap *pmap, struct pv_head *pvh, pt_entry_t *ptes,
+pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
 	boolean_t clear_cache)
 {
 	struct pmap *kpmap = pmap_kernel();
@@ -2194,8 +2160,9 @@ pmap_vac_me_user(struct pmap *pmap, stru
 	int cacheable_entries = 0;
 	int kern_cacheable = 0;
 	int other_writable = 0;
+	int prot;
 
-	pv = pvh->pvh_list;
+	pv = pg->mdpage.pvh_list;
 	KASSERT(ptes != NULL);
 
 	/*
@@ -2237,12 +2204,12 @@ pmap_vac_me_user(struct pmap *pmap, stru
 		if (cacheable_entries == 0)
 		    return;
 		for (npv = pv; npv; npv = npv->pv_next) {
-			if ((pmap == npv->pv_pmap 
-			    || kpmap == npv->pv_pmap) && 
+			if ((pmap == npv->pv_pmap ||
+			     kpmap == npv->pv_pmap) && 
 			    (npv->pv_flags & PT_NC) == 0) {
-				ptes[arm_byte_to_page(npv->pv_va)] &= 
-				    ~(PT_C | PT_B);
- 				npv->pv_flags |= PT_NC;
+				prot = (npv->pv_flags & PT_Wr) ?
+				    VM_PROT_READ | VM_PROT_WRITE :
+				    VM_PROT_READ;
 				/*
 				 * If this page needs flushing from the
 				 * cache, and we aren't going to do it
@@ -2256,6 +2223,11 @@ pmap_vac_me_user(struct pmap *pmap, stru
 					    NBPG);
 					cpu_tlb_flushID_SE(npv->pv_va);
 				}
+				ptes[arm_byte_to_page(npv->pv_va)] =
+				    ptes[arm_byte_to_page(npv->pv_va)] |
+				    pmap_pte_proto(npv->pv_pmap, prot,
+						   PTE_PROTO_NOCACHE);
+ 				npv->pv_flags |= PT_NC;
 			}
 		}
 		if ((clear_cache && cacheable_entries >= 4) ||
@@ -2273,8 +2245,13 @@ pmap_vac_me_user(struct pmap *pmap, stru
 			if ((pmap == npv->pv_pmap ||
 			    (kpmap == npv->pv_pmap && other_writable == 0)) && 
 			    (npv->pv_flags & PT_NC)) {
-				ptes[arm_byte_to_page(npv->pv_va)] |=
-				    pte_cache_mode;
+				prot = (npv->pv_flags & PT_Wr) ?
+				    VM_PROT_READ | VM_PROT_WRITE :
+				    VM_PROT_READ;
+				ptes[arm_byte_to_page(npv->pv_va)] =
+				    ptes[arm_byte_to_page(npv->pv_va)] |
+				    pmap_pte_proto(npv->pv_pmap, prot,
+						   PTE_PROTO_CACHE);
 				npv->pv_flags &= ~PT_NC;
 			}
 		}
@@ -2315,7 +2292,7 @@ pmap_remove(pmap, sva, eva)
 	pt_entry_t *pte = 0, *ptes;
 	paddr_t pa;
 	int pmap_active;
-	struct pv_head *pvh;
+	struct vm_page *pg;
 
 	/* Exit quick if there is no pmap */
 	if (!pmap)
@@ -2359,8 +2336,6 @@ pmap_remove(pmap, sva, eva)
 
 		/* We've found a valid PTE, so this page of PTEs has to go. */
 		if (pmap_pte_v(pte)) {
-			int bank, off;
-
 			/* Update statistics */
 			--pmap->pm_stats.resident_count;
 
@@ -2417,14 +2392,14 @@ pmap_remove(pmap, sva, eva)
 			 * we could cluster a lot of these and do a
 			 * number of sequential pages in one go.
 			 */
-			if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
+			if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
 				struct pv_entry *pve;
-				pvh = &vm_physmem[bank].pmseg.pvhead[off];
-				simple_lock(&pvh->pvh_lock);
-				pve = pmap_remove_pv(pvh, pmap, sva);
+
+				simple_lock(&pg->mdpage.pvh_slock);
+				pve = pmap_remove_pv(pg, pmap, sva);
 				pmap_free_pv(pmap, pve);
-				pmap_vac_me_harder(pmap, pvh, ptes, FALSE);
-				simple_unlock(&pvh->pvh_lock);
+				pmap_vac_me_harder(pmap, pg, ptes, FALSE);
+				simple_unlock(&pg->mdpage.pvh_slock);
 			}
 		}
 		sva += NBPG;
@@ -2462,29 +2437,26 @@ pmap_remove(pmap, sva, eva)
  */
 
 static void
-pmap_remove_all(pa)
-	paddr_t pa;
+pmap_remove_all(pg)
+	struct vm_page *pg;
 {
 	struct pv_entry *pv, *npv;
-	struct pv_head *pvh;
 	struct pmap *pmap;
 	pt_entry_t *pte, *ptes;
 
-	PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa));
+	PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
 
 	/* set pv_head => pmap locking */
 	PMAP_HEAD_TO_MAP_LOCK();
 
-	pvh = pmap_find_pvh(pa);
-	simple_lock(&pvh->pvh_lock);
-	
-	pv = pvh->pvh_list;
-	if (pv == NULL)
-	{
-	    PDEBUG(0, printf("free page\n"));
-	    simple_unlock(&pvh->pvh_lock);
-	    PMAP_HEAD_TO_MAP_UNLOCK();
-	    return;
+	simple_lock(&pg->mdpage.pvh_slock);
+
+	pv = pg->mdpage.pvh_list;
+	if (pv == NULL) {
+		PDEBUG(0, printf("free page\n"));
+		simple_unlock(&pg->mdpage.pvh_slock);
+		PMAP_HEAD_TO_MAP_UNLOCK();
+		return;
 	}
 	pmap_clean_page(pv, FALSE);
 
@@ -2528,8 +2500,8 @@ reduce wiring count on page table pages 
 		pv = npv;
 		pmap_unmap_ptes(pmap);
 	}
-	pvh->pvh_list = NULL;
-	simple_unlock(&pvh->pvh_lock);
+	pg->mdpage.pvh_list = NULL;
+	simple_unlock(&pg->mdpage.pvh_slock);
 	PMAP_HEAD_TO_MAP_UNLOCK();	
 
 	PDEBUG(0, printf("done\n"));
@@ -2553,8 +2525,7 @@ pmap_protect(pmap, sva, eva, prot)
 	int armprot;
 	int flush = 0;
 	paddr_t pa;
-	int bank, off;
-	struct pv_head *pvh;
+	struct vm_page *pg;
 
 	PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
 	    pmap, sva, eva, prot));
@@ -2621,12 +2592,11 @@ pmap_protect(pmap, sva, eva, prot)
 		/* Get the physical page index */
 
 		/* Clear write flag */
-		if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
-			pvh = &vm_physmem[bank].pmseg.pvhead[off];
-			simple_lock(&pvh->pvh_lock);
-			(void) pmap_modify_pv(pmap, sva, pvh, PT_Wr, 0);
-			pmap_vac_me_harder(pmap, pvh, ptes, FALSE);
-			simple_unlock(&pvh->pvh_lock);
+		if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
+			simple_lock(&pg->mdpage.pvh_slock);
+			(void) pmap_modify_pv(pmap, sva, pg, PT_Wr, 0);
+			pmap_vac_me_harder(pmap, pg, ptes, FALSE);
+			simple_unlock(&pg->mdpage.pvh_slock);
 		}
 
 next:
@@ -2665,12 +2635,11 @@ pmap_enter(pmap, va, pa, prot, flags)
 {
 	pt_entry_t *pte, *ptes;
 	u_int npte;
-	int bank, off;
 	paddr_t opa;
 	int nflags;
 	boolean_t wired = (flags & PMAP_WIRED) != 0;
 	struct pv_entry *pve;
-	struct pv_head	*pvh;
+	struct vm_page	*pg;
 	int error;
 
 	PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
@@ -2690,6 +2659,13 @@ pmap_enter(pmap, va, pa, prot, flags)
 			panic("pmap_enter: entering PT page");
 	}
 #endif
+
+	/*
+	 * Get pointer to the page.  Later on in this function, we
+	 * test for a managed page by checking pg != NULL.
+	 */
+	pg = PHYS_TO_VM_PAGE(pa);
+
 	/* get lock */
 	PMAP_MAP_TO_HEAD_LOCK();
 	/*
@@ -2746,14 +2722,11 @@ pmap_enter(pmap, va, pa, prot, flags)
 			    va, pa));
 
 			/* Has the wiring changed ? */
-			if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
-				pvh = &vm_physmem[bank].pmseg.pvhead[off];
-				simple_lock(&pvh->pvh_lock);
-				(void) pmap_modify_pv(pmap, va, pvh,
+			if (pg != NULL) {
+				simple_lock(&pg->mdpage.pvh_slock);
+				(void) pmap_modify_pv(pmap, va, pg,
 				    PT_Wr | PT_W, nflags);
-				simple_unlock(&pvh->pvh_lock);
- 			} else {
-				pvh = NULL;
+				simple_unlock(&pg->mdpage.pvh_slock);
 			}
 		} else {
 			/* We are replacing the page with a new one. */
@@ -2766,11 +2739,10 @@ pmap_enter(pmap, va, pa, prot, flags)
 			 * If it is part of our managed memory then we
 			 * must remove it from the PV list
 			 */
-			if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
-				pvh = &vm_physmem[bank].pmseg.pvhead[off];
-				simple_lock(&pvh->pvh_lock);
-				pve = pmap_remove_pv(pvh, pmap, va);
-				simple_unlock(&pvh->pvh_lock);
+			if (pg != NULL) {
+				simple_lock(&pg->mdpage.pvh_slock);
+				pve = pmap_remove_pv(pg, pmap, va);
+				simple_unlock(&pg->mdpage.pvh_slock);
 			} else {
 				pve = NULL;
 			}
@@ -2789,10 +2761,7 @@ pmap_enter(pmap, va, pa, prot, flags)
 		/*
 		 * Enter on the PV list if part of our managed memory
 		 */
-		bank = vm_physseg_find(atop(pa), &off);
-		
-		if (pmap_initialized && (bank != -1)) {
-			pvh = &vm_physmem[bank].pmseg.pvhead[off];
+		if (pmap_initialized && pg != NULL) {
 			if (pve == NULL) {
 				pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
 				if (pve == NULL) {
@@ -2804,11 +2773,9 @@ pmap_enter(pmap, va, pa, prot, flags)
 				}
 			}
 			/* enter_pv locks pvh when adding */
-			pmap_enter_pv(pvh, pve, pmap, va, NULL, nflags);
-		} else {
-			pvh = NULL;
-			if (pve != NULL)
-				pmap_free_pv(pmap, pve);
+			pmap_enter_pv(pg, pve, pmap, va, NULL, nflags);
+		} else if (pve != NULL) {
+			pmap_free_pv(pmap, pve);
 		}
 	}
 
@@ -2818,33 +2785,46 @@ pmap_enter(pmap, va, pa, prot, flags)
 #endif
 
 	/* Construct the pte, giving the correct access. */
-	npte = (pa & PG_FRAME);
+	KDASSERT((pa & PG_FRAME) == pa);
+	npte = pa;
 
-	/* VA 0 is magic. */
-	if (pmap != pmap_kernel() && va != 0)
-		npte |= PT_AP(AP_U);
-
-	if (pmap_initialized && bank != -1) {
+	/*
+	 * VA 0 is magic; that's where the vector page is.  User pmaps
+	 * always need to see an un-cached view of this page (which they
+	 * would anyway, since it's not in the managed page pool, so there
+	 * is no need to check for it).
+	 */
+	if (pmap_initialized && pg != NULL) {
+		KDASSERT(va != 0);
 #ifdef DIAGNOSTIC
 		if ((flags & VM_PROT_ALL) & ~prot)
 			panic("pmap_enter: access_type exceeds prot");
 #endif
-		npte |= pte_cache_mode;
+		/*
+		 * XXXJRT -- consider optimization potential.
+		 * C.f. Alpha pmap.
+		 */
 		if (flags & VM_PROT_WRITE) {
-			npte |= L2_SPAGE | PT_AP(AP_W);
-			vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
+			npte |= pmap_pte_proto(pmap,
+					       VM_PROT_READ|VM_PROT_WRITE,
+					       PTE_PROTO_CACHE);
+			pg->mdpage.pvh_attrs |= PT_H | PT_M;
 		} else if (flags & VM_PROT_ALL) {
-			npte |= L2_SPAGE;
-			vm_physmem[bank].pmseg.attrs[off] |= PT_H;
-		} else
-			npte |= L2_INVAL;
+			npte |= pmap_pte_proto(pmap,
+					       VM_PROT_READ,
+					       PTE_PROTO_CACHE);
+			pg->mdpage.pvh_attrs |= PT_H;
+		}
+		/*
+		 * ...else we want to take a fault, so don't do anything
+		 * to the PTE here.
+		 */
 	} else {
-		if (prot & VM_PROT_WRITE)
-			npte |= L2_SPAGE | PT_AP(AP_W);
-		else if (prot & VM_PROT_ALL)
-			npte |= L2_SPAGE;
-		else
-			npte |= L2_INVAL;
+		/*
+		 * Non-managed pages entered via this interface
+		 * are implicitly un-cached.
+		 */
+		npte |= pmap_pte_proto(pmap, prot, PTE_PROTO_NOCACHE);
 	}
 
 #ifdef MYCROFT_HACK
@@ -2854,19 +2834,19 @@ pmap_enter(pmap, va, pa, prot, flags)
 
 	*pte = npte;
 
-	if (pmap_initialized && bank != -1)
-	{
+	if (pmap_initialized && pg != NULL) {
 		boolean_t pmap_active = FALSE;
-		/* XXX this will change once the whole of pmap_enter uses
+		/*
+		 * XXX this will change once the whole of pmap_enter uses
 		 * map_ptes
 		 */
 		ptes = pmap_map_ptes(pmap);
 		if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
 		    || (pmap == pmap_kernel()))
 			pmap_active = TRUE;
-		simple_lock(&pvh->pvh_lock);
- 		pmap_vac_me_harder(pmap, pvh, ptes, pmap_active);
-		simple_unlock(&pvh->pvh_lock);
+		simple_lock(&pg->mdpage.pvh_slock);
+ 		pmap_vac_me_harder(pmap, pg, ptes, pmap_active);
+		simple_unlock(&pg->mdpage.pvh_slock);
 		pmap_unmap_ptes(pmap);
 	}
 
@@ -2905,7 +2885,7 @@ pmap_kenter_pa(va, pa, prot)
 		 */
 
 	    	/* must lock the pmap */
-	    	simple_lock(&(pmap_kernel()->pm_obj.vmobjlock));
+	    	simple_lock(&pmap->pm_obj.vmobjlock);
 		/* Allocate a page table */
 		pg = uvm_pagealloc(&(pmap_kernel()->pm_obj), 0, NULL,
 		    UVM_PGA_USERESERVE | UVM_PGA_ZERO);
@@ -2916,11 +2896,18 @@ pmap_kenter_pa(va, pa, prot)
 
 		/* Wire this page table into the L1. */
 		pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(pg), TRUE);
-		simple_unlock(&(pmap_kernel()->pm_obj.vmobjlock));
+		simple_unlock(&pmap->pm_obj.vmobjlock);
 	}
 	pte = vtopte(va);
 	KASSERT(!pmap_pte_v(pte));
-	*pte = L2_PTE(pa, AP_KRW);
+#if 1 /* XXX */
+	*pte = pa | pmap_pte_proto(pmap_kernel(),
+				   VM_PROT_READ|VM_PROT_WRITE,
+				   PTE_PROTO_CACHE);
+#else
+	*pte = pa | pmap_pte_proto(pmap_kernel(), prot,
+				   PTE_PROTO_CACHE);
+#endif
 }
 
 void
@@ -2956,9 +2943,9 @@ pmap_page_protect(pg, prot)
 	struct vm_page *pg;
 	vm_prot_t prot;
 {
-	paddr_t pa = VM_PAGE_TO_PHYS(pg);
 
-	PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot));
+	PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
+	    VM_PAGE_TO_PHYS(pg), prot));
 
 	switch(prot) {
 	case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
@@ -2967,11 +2954,11 @@ pmap_page_protect(pg, prot)
 
 	case VM_PROT_READ:
 	case VM_PROT_READ|VM_PROT_EXECUTE:
-		pmap_copy_on_write(pa);
+		pmap_copy_on_write(pg);
 		break;
 
 	default:
-		pmap_remove_all(pa);
+		pmap_remove_all(pg);
 		break;
 	}
 }
@@ -2992,8 +2979,7 @@ pmap_unwire(pmap, va)
 {
 	pt_entry_t *pte;
 	paddr_t pa;
-	int bank, off;
-	struct pv_head *pvh;
+	struct vm_page *pg;
 
 	/*
 	 * Make sure pmap is valid. -dct
@@ -3009,13 +2995,13 @@ pmap_unwire(pmap, va)
 	/* Extract the physical address of the page */
 	pa = pmap_pte_pa(pte);
 
-	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
+	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
 		return;
-	pvh = &vm_physmem[bank].pmseg.pvhead[off];
-	simple_lock(&pvh->pvh_lock);
+
 	/* Update the wired bit in the pv entry for this page. */
-	(void) pmap_modify_pv(pmap, va, pvh, PT_W, 0);
-	simple_unlock(&pvh->pvh_lock);
+	simple_lock(&pg->mdpage.pvh_slock);
+	(void) pmap_modify_pv(pmap, va, pg, PT_W, 0);
+	simple_unlock(&pg->mdpage.pvh_slock);
 }
 
 /*
@@ -3200,54 +3186,44 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, 
 
 #if defined(PMAP_DEBUG)
 void
-pmap_dump_pvlist(phys, m)
-	vaddr_t phys;
+pmap_dump_pvlist(pg, m)
+	struct vm_page *pg;
 	char *m;
 {
-	struct pv_head *pvh;
 	struct pv_entry *pv;
-	int bank, off;
 
-	if ((bank = vm_physseg_find(atop(phys), &off)) == -1) {
-		printf("INVALID PA\n");
-		return;
-	}
-	pvh = &vm_physmem[bank].pmseg.pvhead[off];
-	simple_lock(&pvh->pvh_lock);
-	printf("%s %08lx:", m, phys);
-	if (pvh->pvh_list == NULL) {
+	simple_lock(&pg->mdpage.pvh_slock);
+	printf("%s %08lx:", m, VM_PAGE_TO_PHYS(pg));
+	if (pg->mdpage.pvh_list == NULL) {
 		printf(" no mappings\n");
 		return;
 	}
 
-	for (pv = pvh->pvh_list; pv; pv = pv->pv_next)
+	for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
 		printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
 		    pv->pv_va, pv->pv_flags);
 
 	printf("\n");
-	simple_unlock(&pvh->pvh_lock);
+	simple_unlock(&pg->mdpage.pvh_slock);
 }
 
 #endif	/* PMAP_DEBUG */
 
 __inline static boolean_t
-pmap_testbit(pa, setbits)
-	paddr_t pa;
+pmap_testbit(pg, setbits)
+	struct vm_page *pg;
 	unsigned int setbits;
 {
-	int bank, off;
-
-	PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits));
 
-	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
-		return(FALSE);
+	PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n",
+	    VM_PAGE_TO_PHYS(pg), setbits));
 
 	/*
 	 * Check saved info only
 	 */
-	if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
+	if (pg->mdpage.pvh_attrs & setbits) {
 		PDEBUG(0, printf("pmap_attributes = %02x\n",
-		    vm_physmem[bank].pmseg.attrs[off]));
+		    pg->mdpage.pvh_attrs));
 		return(TRUE);
 	}
 
@@ -3316,34 +3292,30 @@ pmap_unmap_ptes(pmap)
  */
 
 static void
-pmap_clearbit(pa, maskbits)
-	paddr_t pa;
+pmap_clearbit(pg, maskbits)
+	struct vm_page *pg;
 	unsigned int maskbits;
 {
 	struct pv_entry *pv;
-	struct pv_head *pvh;
 	pt_entry_t *pte;
 	vaddr_t va;
-	int bank, off, tlbentry;
+	int tlbentry;
 
 	PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
-	    pa, maskbits));
+	    VM_PAGE_TO_PHYS(pg), maskbits));
 
 	tlbentry = 0;
 	
-	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
-		return;
 	PMAP_HEAD_TO_MAP_LOCK();
-	pvh = &vm_physmem[bank].pmseg.pvhead[off];
-	simple_lock(&pvh->pvh_lock);
+	simple_lock(&pg->mdpage.pvh_slock);
 	
 	/*
 	 * Clear saved attributes (modify, reference)
 	 */
-	vm_physmem[bank].pmseg.attrs[off] &= ~maskbits;
+	pg->mdpage.pvh_attrs &= ~maskbits;
 
-	if (pvh->pvh_list == NULL) {
-		simple_unlock(&pvh->pvh_lock);
+	if (pg->mdpage.pvh_list == NULL) {
+		simple_unlock(&pg->mdpage.pvh_slock);
 		PMAP_HEAD_TO_MAP_UNLOCK();
 		return;
 	}
@@ -3351,13 +3323,13 @@ pmap_clearbit(pa, maskbits)
 	/*
 	 * Loop over all current mappings setting/clearing as appropos
 	 */
-	for (pv = pvh->pvh_list; pv; pv = pv->pv_next) {
+	for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
 		va = pv->pv_va;
 		pv->pv_flags &= ~maskbits;
 		pte = pmap_pte(pv->pv_pmap, va);
 		KASSERT(pte != NULL);
 		if (maskbits & (PT_Wr|PT_M)) {
-			if ((pv->pv_flags & PT_NC)) {
+			if (pv->pv_flags & PT_NC) {
 				/* 
 				 * Entry is not cacheable: reenable
 				 * the cache, nothing to flush
@@ -3375,36 +3347,56 @@ pmap_clearbit(pa, maskbits)
 				 *
 				 */
 				if (maskbits & PT_Wr) {
-					*pte |= pte_cache_mode;
+					/*
+					 * Clear the NC bit in the pv
+					 * entry; we'll update the PTE
+					 * below.
+					 */
 					pv->pv_flags &= ~PT_NC;
 				}
-			} else if (pmap_is_curpmap(pv->pv_pmap))
-				/* 
+			} else if (pmap_is_curpmap(pv->pv_pmap)) {
+				/*
 				 * Entry is cacheable: check if pmap is
-				 * current if it is flush it,
-				 * otherwise it won't be in the cache
+				 * current, and if it is, flush it,
+				 * otherwise it won't be in the cache.
 				 */
 				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
+			}
 
-			/* make the pte read only */
-			*pte &= ~PT_AP(AP_W);
+			/* Make the PTE read-only. */
+			*pte = (*pte & PG_FRAME) |
+			    pmap_pte_proto(pv->pv_pmap, VM_PROT_READ,
+					   (pv->pv_flags & PT_NC) ?
+					   PTE_PROTO_NOCACHE :
+					   PTE_PROTO_CACHE);
 		}
 
-		if (maskbits & PT_H)
-			*pte = (*pte & ~L2_MASK) | L2_INVAL;
+		if (maskbits & PT_H) {
+			/*
+			 * We are going to revoke the mapping for this
+			 * page.  If it is writable, make sure to flush
+			 * it from the cache.
+			 *
+			 * XXXJRT This flush might be redundant!
+			 */
+			if ((pv->pv_flags & PT_Wr) != 0 &&
+			    pmap_is_curpmap(pv->pv_pmap))
+				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
 
-		if (pmap_is_curpmap(pv->pv_pmap))
+			*pte = *pte & PG_FRAME;
+		}
+
+		if (pmap_is_curpmap(pv->pv_pmap)) {
 			/* 
-			 * if we had cacheable pte's we'd clean the
-			 * pte out to memory here
-			 *
-			 * flush tlb entry as it's in the current pmap
+			 * The PTE has been modifed, and it's in the
+			 * current pmap, invalidate the TLB entry.
 			 */
 			cpu_tlb_flushID_SE(pv->pv_va); 
+		}
 	}
 	cpu_cpwait();
 
-	simple_unlock(&pvh->pvh_lock);
+	simple_unlock(&pg->mdpage.pvh_slock);
 	PMAP_HEAD_TO_MAP_UNLOCK();
 }
 
@@ -3413,12 +3405,11 @@ boolean_t
 pmap_clear_modify(pg)
 	struct vm_page *pg;
 {
-	paddr_t pa = VM_PAGE_TO_PHYS(pg);
 	boolean_t rv;
 
-	PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa));
-	rv = pmap_testbit(pa, PT_M);
-	pmap_clearbit(pa, PT_M);
+	PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", VM_PAGE_TO_PHYS(pg)));
+	rv = pmap_testbit(pg, PT_M);
+	pmap_clearbit(pg, PT_M);
 	return rv;
 }
 
@@ -3427,22 +3418,23 @@ boolean_t
 pmap_clear_reference(pg)
 	struct vm_page *pg;
 {
-	paddr_t pa = VM_PAGE_TO_PHYS(pg);
 	boolean_t rv;
 
-	PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa));
-	rv = pmap_testbit(pa, PT_H);
-	pmap_clearbit(pa, PT_H);
+	PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n",
+	    VM_PAGE_TO_PHYS(pg)));
+	rv = pmap_testbit(pg, PT_H);
+	pmap_clearbit(pg, PT_H);
 	return rv;
 }
 
 
 void
-pmap_copy_on_write(pa)
-	paddr_t pa;
+pmap_copy_on_write(pg)
+	struct vm_page *pg;
 {
-	PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa));
-	pmap_clearbit(pa, PT_Wr);
+	PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n",
+	    VM_PAGE_TO_PHYS(pg)));
+	pmap_clearbit(pg, PT_Wr);
 }
 
 
@@ -3450,11 +3442,11 @@ boolean_t
 pmap_is_modified(pg)
 	struct vm_page *pg;
 {
-	paddr_t pa = VM_PAGE_TO_PHYS(pg);
 	boolean_t result;
     
-	result = pmap_testbit(pa, PT_M);
-	PDEBUG(1, printf("pmap_is_modified pa=%08lx %x\n", pa, result));
+	result = pmap_testbit(pg, PT_M);
+	PDEBUG(1, printf("pmap_is_modified pa=%08lx %x\n",
+	    VM_PAGE_TO_PHYS(pg), result));
 	return (result);
 }
 
@@ -3463,11 +3455,11 @@ boolean_t
 pmap_is_referenced(pg)
 	struct vm_page *pg;
 {
-	paddr_t pa = VM_PAGE_TO_PHYS(pg);
 	boolean_t result;
 	
-	result = pmap_testbit(pa, PT_H);
-	PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result));
+	result = pmap_testbit(pg, PT_H);
+	PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n",
+	    VM_PAGE_TO_PHYS(pg), result));
 	return (result);
 }
 
@@ -3479,8 +3471,7 @@ pmap_modified_emulation(pmap, va)
 {
 	pt_entry_t *pte;
 	paddr_t pa;
-	int bank, off;
-	struct pv_head *pvh;
+	struct vm_page *pg;
 	u_int flags;
 
 	PDEBUG(2, printf("pmap_modified_emulation\n"));
@@ -3499,21 +3490,19 @@ pmap_modified_emulation(pmap, va)
 		return(0);
 
 	/* This can happen if user code tries to access kernel memory. */
+	/* XXXJRT Use address-based check.  C.f. Alpha pmap. */
 	if ((*pte & PT_AP(AP_W)) != 0)
 		return (0);
 
 	/* Extract the physical address of the page */
 	pa = pmap_pte_pa(pte);
-	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
+	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
 		return(0);
 
 	PMAP_HEAD_TO_MAP_LOCK();
-	/* Get the current flags for this page. */
-	pvh = &vm_physmem[bank].pmseg.pvhead[off];
-	/* XXX: needed if we hold head->map lock? */
-	simple_lock(&pvh->pvh_lock);
+	simple_lock(&pg->mdpage.pvh_slock);
 	
-	flags = pmap_modify_pv(pmap, va, pvh, 0, 0);
+	flags = pmap_modify_pv(pmap, va, pg, 0, 0);
 	PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
 
 	/*
@@ -3524,14 +3513,14 @@ pmap_modified_emulation(pmap, va)
 	 * modified bit
 	 */
 	if (~flags & PT_Wr) {
-	    	simple_unlock(&pvh->pvh_lock);
+	    	simple_unlock(&pg->mdpage.pvh_slock);
 		PMAP_HEAD_TO_MAP_UNLOCK();
 		return(0);
 	}
 
 	PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
 	    va, pte, *pte));
-	vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
+	pg->mdpage.pvh_attrs |= PT_H | PT_M;
 
 	/* 
 	 * Re-enable write permissions for the page.  No need to call
@@ -3540,14 +3529,18 @@ pmap_modified_emulation(pmap, va)
 	 * already set the cacheable bits based on the assumption that we
 	 * can write to this page.
 	 */
-	*pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
+	*pte = (*pte & PG_FRAME) |
+	    pmap_pte_proto(pmap, VM_PROT_READ|VM_PROT_WRITE,
+			   (flags & PT_NC) ? PTE_PROTO_NOCACHE
+					   : PTE_PROTO_CACHE);
 	PDEBUG(0, printf("->(%08x)\n", *pte));
 
-	simple_unlock(&pvh->pvh_lock);
+	simple_unlock(&pg->mdpage.pvh_slock);
 	PMAP_HEAD_TO_MAP_UNLOCK();
-	/* Return, indicating the problem has been dealt with */
+
 	cpu_tlb_flushID_SE(va);
 	cpu_cpwait();
+
 	return(1);
 }
 
@@ -3558,8 +3551,9 @@ pmap_handled_emulation(pmap, va)
 	vaddr_t va;
 {
 	pt_entry_t *pte;
+	struct vm_page *pg;
 	paddr_t pa;
-	int bank, off;
+	int flags;
 
 	PDEBUG(2, printf("pmap_handled_emulation\n"));
 
@@ -3582,27 +3576,43 @@ pmap_handled_emulation(pmap, va)
 
 	/* Extract the physical address of the page */
 	pa = pmap_pte_pa(pte);
-	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
-		return(0);
 
+	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
+		return (0);
+
+	PMAP_HEAD_TO_MAP_LOCK();
+	simple_lock(&pg->mdpage.pvh_slock);
+
 	/*
-	 * Ok we just enable the pte and mark the attibs as handled
+	 * XXXJRT Get the cacheable/non-cacheable state for this
+	 * XXXJRT mapping.  This should die, in favor of stuffing
+	 * XXXJRT these bits into the vm_page.
 	 */
+	flags = pmap_modify_pv(pmap, va, pg, 0, 0);
+
+	/*
+	 * Ok we just enable the pte and mark the attribs as handled
+	 */
 	PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
 	    va, pte, *pte));
-	vm_physmem[bank].pmseg.attrs[off] |= PT_H;
-	*pte = (*pte & ~L2_MASK) | L2_SPAGE;
+	pg->mdpage.pvh_attrs |= PT_H;
+	*pte = (*pte & PG_FRAME) | pmap_pte_proto(pmap,
+						  VM_PROT_READ,
+						  (flags & PT_NC) ?
+						  PTE_PROTO_NOCACHE :
+						  PTE_PROTO_CACHE);
 	PDEBUG(0, printf("->(%08x)\n", *pte));
+
+	simple_unlock(&pg->mdpage.pvh_slock);
+	PMAP_HEAD_TO_MAP_UNLOCK();
 
-	/* Return, indicating the problem has been dealt with */
 	cpu_tlb_flushID_SE(va);
 	cpu_cpwait();
+
 	return(1);
 }
 
 
-
-
 /*
  * pmap_collect: free resources held by a pmap
  *
@@ -3719,5 +3729,411 @@ pmap_alloc_ptp(struct pmap *pmap, vaddr_
 //	pmap->pm_ptphint = ptp;
 	return (ptp);
 }
+
+/************************ Bootstrapping routines ****************************/
+
+/*
+ * pmap_map_section:
+ *
+ *	Create a single section mapping.
+ */
+void
+pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
+{
+	pd_entry_t *pde = (pd_entry_t *) l1pt;
+
+	KASSERT(((va | pa) & (L1_SEC_SIZE - 1)) == 0);
+
+	pde[va >> PDSHIFT] = pa | l1sec_proto(prot, cache);
+}
 
-/* End of pmap.c */
+/*
+ * pmap_map_entry:
+ *
+ *	Create a single page mapping.
+ */
+void
+pmap_map_entry(vaddr_t l2pt, vaddr_t va, paddr_t pa, int prot, int cache)
+{
+	pt_entry_t *pte = (pt_entry_t *) l2pt;
+
+#ifndef cats
+	pte[(va >> PGSHIFT) & 0x3ff] =
+	    pa | pte_proto(PTE_PROTO_KERNEL, prot, cache);
+#else
+	pte[(va >> PGSHIFT) & 0x7ff] =
+	    pa | pte_proto(PTE_PROTO_KERNEL, prot, cache);
+#endif /* cats */
+}
+
+/*
+ * pmap_map_l2pt:
+ *
+ *	Map L2 page table at the specified physical address
+ *	into the slot for the specified virtual address in
+ *	the L1 table.
+ */
+void
+pmap_map_l2pt(vaddr_t l1pt, vaddr_t va, paddr_t pa)
+{
+	pd_entry_t *pde = (pd_entry_t *) l1pt;
+
+	KASSERT((pa & PG_FRAME) == pa);
+
+	pde[(va >> PDSHIFT) + 0] = (pa + 0x000) | pde_proto;
+	pde[(va >> PDSHIFT) + 1] = (pa + 0x400) | pde_proto;
+	pde[(va >> PDSHIFT) + 2] = (pa + 0x800) | pde_proto;
+	pde[(va >> PDSHIFT) + 3] = (pa + 0xc00) | pde_proto;
+}
+
+/*
+ * pmap_map_chunk:
+ *
+ *	Map a chunk of memory using the most efficient mappings
+ *	possible (section, large page, small page) into the
+ *	provided L1 and L2 tables at the specified virtual address.
+ */
+vsize_t
+pmap_map_chunk(vaddr_t l1pt, vaddr_t l2pt, vaddr_t va, paddr_t pa,
+    vsize_t size, int prot, int cache)
+{
+	pd_entry_t *pde = (pd_entry_t *) l1pt;
+	pt_entry_t *pte = (pt_entry_t *) l2pt;
+	vsize_t resid;
+	int i;
+
+	resid = (size + (NBPG - 1)) & ~(NBPG - 1);
+
+#ifdef VERBOSE_INIT_ARM
+	printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
+	    "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
+#endif
+
+	size = resid;
+
+	while (resid > 0) {
+		/* See if we can use a section mapping. */
+		if (l1pt &&
+		    ((pa | va) & (L1_SEC_SIZE - 1)) == 0 &&
+		    resid >= L1_SEC_SIZE) {
+#ifdef VERBOSE_INIT_ARM
+			printf("S");
+#endif
+			pde[va >> PDSHIFT] = pa | l1sec_proto(prot, cache);
+			va += L1_SEC_SIZE;
+			pa += L1_SEC_SIZE;
+			resid -= L1_SEC_SIZE;
+			continue;
+		}
+
+		/* See if we can use a L2 large page mapping. */
+		if (((pa | va) & (L2_LPAGE_SIZE - 1)) == 0 &&
+		    resid >= L2_LPAGE_SIZE) {
+#ifdef VERBOSE_INIT_ARM
+			printf("L");
+#endif
+			for (i = 0; i < 16; i++) {
+#ifndef cats /* XXXJRT */
+				pte[((va >> PGSHIFT) & 0x3f0) + i] = pa |
+				    lpte_proto(prot, cache);
+#else
+				pte[((va >> PGSHIFT) & 0x7f0) + i] = pa |
+				    lpte_proto(prot, cache);
+#endif /* cats */
+			}
+			va += L2_LPAGE_SIZE;
+			pa += L2_LPAGE_SIZE;
+			resid -= L2_LPAGE_SIZE;
+			continue;
+		}
+
+		/* Use a small page mapping. */
+#ifdef VERBOSE_INIT_ARM
+		printf("P");
+#endif
+#ifndef cats /* XXXJRT */
+		pte[(va >> PGSHIFT) & 0x3ff] = pa |
+		    pte_proto(PTE_PROTO_KERNEL, prot, cache);
+#else
+		pte[(va >> PGSHIFT) & 0x7ff] = pa |
+		    pte_proto(PTE_PROTO_KERNEL, prot, cache);
+#endif /* cats */
+		va += NBPG;
+		pa += NBPG;
+		resid -= NBPG;
+	}
+#ifdef VERBOSE_INIT_ARM
+	printf("\n");
+#endif
+	return (size);
+}
+
+/*
+ * pmap_pte_protos_init:
+ *
+ *	Initialize the prototype PTE arrays.  This is done very
+ *	early, right after the cpufunc vector is selected.
+ */
+#if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
+    defined(CPU_ARM8) || defined(CPU_SA110)
+void
+pmap_pte_protos_init_arm678(void)
+{
+	int prot;
+
+	/*
+	 * NOTE: For all ARM6, ARM7, and ARM8 CPUs, bit 4 (the
+	 * implementation defined bit) of L1 descriptors should
+	 * be set to 1.
+	 */
+
+	pde_proto = L1_PAGE | PT_U;
+
+#define	CACHE	(PT_B|PT_C)
+
+	for (prot = 0; prot < 8; prot++) {
+		if (prot & VM_PROT_WRITE) {
+			pte_proto(PTE_PROTO_KERNEL, prot,
+				  PTE_PROTO_CACHE) =
+			    L2_SPAGE | PT_AP(AP_KRW) | CACHE;
+
+			pte_proto(PTE_PROTO_KERNEL, prot,
+				  PTE_PROTO_NOCACHE) =
+			    L2_SPAGE | PT_AP(AP_KRW);
+
+			pte_proto(PTE_PROTO_USER, prot,
+				  PTE_PROTO_CACHE) =
+			    L2_SPAGE | PT_AP(AP_KRWURW) | CACHE;
+
+			pte_proto(PTE_PROTO_USER, prot,
+				  PTE_PROTO_NOCACHE) =
+			    L2_SPAGE | PT_AP(AP_KRWURW);
+
+			l1sec_proto(prot, PTE_PROTO_CACHE) =
+			    (AP_KRW << AP_SECTION_SHIFT) |
+			    L1_SECTION | PT_U | CACHE;
+
+			l1sec_proto(prot, PTE_PROTO_NOCACHE) =
+			    (AP_KRW << AP_SECTION_SHIFT) |
+			    L1_SECTION | PT_U;
+
+			lpte_proto(prot, PTE_PROTO_CACHE) =
+			    L2_LPAGE | PT_AP(AP_KRW) | CACHE;
+
+			lpte_proto(prot, PTE_PROTO_NOCACHE) =
+			    L2_LPAGE | PT_AP(AP_KRW);
+		} else if (prot & VM_PROT_ALL) {
+			pte_proto(PTE_PROTO_KERNEL, prot,
+				  PTE_PROTO_CACHE) =
+			    L2_SPAGE | PT_AP(AP_KR) | CACHE;
+
+			pte_proto(PTE_PROTO_KERNEL, prot,
+				  PTE_PROTO_NOCACHE) =
+			    L2_SPAGE | PT_AP(AP_KR);
+
+			pte_proto(PTE_PROTO_USER, prot,
+				  PTE_PROTO_CACHE) =
+			    L2_SPAGE | PT_AP(AP_KRWUR) | CACHE;
+
+			pte_proto(PTE_PROTO_USER, prot,
+				  PTE_PROTO_NOCACHE) =
+			    L2_SPAGE | PT_AP(AP_KRWUR);
+
+			l1sec_proto(prot, PTE_PROTO_CACHE) =
+			    (AP_KR << AP_SECTION_SHIFT) |
+			    L1_SECTION | PT_U | CACHE;
+
+			l1sec_proto(prot, PTE_PROTO_NOCACHE) =
+			    (AP_KR << AP_SECTION_SHIFT) |
+			    L1_SECTION | PT_U;
+
+			lpte_proto(prot, PTE_PROTO_CACHE) =
+			    L2_LPAGE | PT_AP(AP_KR) | CACHE;
+
+			lpte_proto(prot, PTE_PROTO_NOCACHE) =
+			    L2_LPAGE | PT_AP(AP_KR);
+		}
+	}
+#undef CACHE
+}
+#endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
+
+#if defined(CPU_ARM9)
+void
+pmap_pte_protos_init_arm9(void)
+{
+	int prot;
+
+	/*
+	 * NOTE: For all ARM9 CPUs, bit 4 (the implementation defined
+	 * bit) of L1 descriptors should be set to 1.
+	 */
+
+	pde_proto = L1_PAGE | PT_U;
+
+/* Use the cache in write-through mode for now. */
+#define	CACHE	(PT_C)
+
+	for (prot = 0; prot < 8; prot++) {
+		if (prot & VM_PROT_WRITE) {
+			pte_proto(PTE_PROTO_KERNEL, prot,
+				  PTE_PROTO_CACHE) =
+			    L2_SPAGE | PT_AP(AP_KRW) | CACHE;
+
+			pte_proto(PTE_PROTO_KERNEL, prot,
+				  PTE_PROTO_NOCACHE) =
+			    L2_SPAGE | PT_AP(AP_KRW);
+
+			pte_proto(PTE_PROTO_USER, prot,
+				  PTE_PROTO_CACHE) =
+			    L2_SPAGE | PT_AP(AP_KRWURW) | CACHE;
+
+			pte_proto(PTE_PROTO_USER, prot,
+				  PTE_PROTO_NOCACHE) =
+			    L2_SPAGE | PT_AP(AP_KRWURW);
+
+			l1sec_proto(prot, PTE_PROTO_CACHE) =
+			    (AP_KRW << AP_SECTION_SHIFT) |
+			    L1_SECTION | PT_U | CACHE;
+
+			l1sec_proto(prot, PTE_PROTO_NOCACHE) =
+			    (AP_KRW << AP_SECTION_SHIFT) |
+			    L1_SECTION | PT_U;
+
+			lpte_proto(prot, PTE_PROTO_CACHE) =
+			    L2_LPAGE | PT_AP(AP_KRW) | CACHE;
+
+			lpte_proto(prot, PTE_PROTO_NOCACHE) =
+			    L2_LPAGE | PT_AP(AP_KRW);
+		} else if (prot & VM_PROT_ALL) {
+			pte_proto(PTE_PROTO_KERNEL, prot,
+				  PTE_PROTO_CACHE) =
+			    L2_SPAGE | PT_AP(AP_KR) | CACHE;
+
+			pte_proto(PTE_PROTO_KERNEL, prot,
+				  PTE_PROTO_NOCACHE) =
+			    L2_SPAGE | PT_AP(AP_KR);
+
+			pte_proto(PTE_PROTO_USER, prot,
+				  PTE_PROTO_CACHE) =
+			    L2_SPAGE | PT_AP(AP_KRWUR) | CACHE;
+
+			pte_proto(PTE_PROTO_USER, prot,
+				  PTE_PROTO_NOCACHE) =
+			    L2_SPAGE | PT_AP(AP_KRWUR);
+
+			l1sec_proto(prot, PTE_PROTO_CACHE) =
+			    (AP_KR << AP_SECTION_SHIFT) |
+			    L1_SECTION | PT_U | CACHE;
+
+			l1sec_proto(prot, PTE_PROTO_NOCACHE) =
+			    (AP_KR << AP_SECTION_SHIFT) |
+			    L1_SECTION | PT_U;
+
+			lpte_proto(prot, PTE_PROTO_CACHE) =
+			    L2_LPAGE | PT_AP(AP_KR) | CACHE;
+
+			lpte_proto(prot, PTE_PROTO_NOCACHE) =
+			    L2_LPAGE | PT_AP(AP_KR);
+		}
+	}
+#undef CACHE
+}
+#endif /* CPU_ARM9 */
+
+#if defined(CPU_XSCALE)
+void
+pmap_pte_protos_init_xscale(void)
+{
+	int prot;
+
+/*
+ * i80200 errata item #40: Store to cacheable memory,
+ * interrupted by an exception, may inadvertently
+ * write to memory.
+ *
+ * This can have an adverse affect on copy-on-write
+ * operation.
+ *
+ * Work-around: Non-writable mappings should have
+ * a cache mode of write-through (this avoids the
+ * problem).  This has no adverse performance affect,
+ * since the mappings are read-only.
+ */
+#define	CACHE_WT	(PT_C)
+#define	CACHE_WB	(PT_C)		/* XXX for now */
+
+	/*
+	 * NOTE: For all XScale CPUs, bit 4 (the implementation defined
+	 * bit) of L1 descriptors should be set to 0.
+	 */
+
+	pde_proto = L1_PAGE;
+
+	for (prot = 0; prot < 8; prot++) {
+		if (prot & VM_PROT_WRITE) {
+			pte_proto(PTE_PROTO_KERNEL, prot,
+				  PTE_PROTO_CACHE) =
+			    L2_SPAGE | PT_AP(AP_KRW) | CACHE_WB;
+
+			pte_proto(PTE_PROTO_KERNEL, prot,
+				  PTE_PROTO_NOCACHE) =
+			    L2_SPAGE | PT_AP(AP_KRW);
+
+			pte_proto(PTE_PROTO_USER, prot,
+				  PTE_PROTO_CACHE) =
+			    L2_SPAGE | PT_AP(AP_KRWURW) | CACHE_WB;
+
+			pte_proto(PTE_PROTO_USER, prot,
+				  PTE_PROTO_NOCACHE) =
+			    L2_SPAGE | PT_AP(AP_KRWURW);
+
+			l1sec_proto(prot, PTE_PROTO_CACHE) =
+			    (AP_KRW << AP_SECTION_SHIFT) |
+			    L1_SECTION | CACHE_WB;
+
+			l1sec_proto(prot, PTE_PROTO_NOCACHE) =
+			    (AP_KRW << AP_SECTION_SHIFT) |
+			    L1_SECTION;
+
+			lpte_proto(prot, PTE_PROTO_CACHE) =
+			    L2_LPAGE | PT_AP(AP_KRW) | CACHE_WB;
+
+			lpte_proto(prot, PTE_PROTO_NOCACHE) =
+			    L2_LPAGE | PT_AP(AP_KRW);
+		} else if (prot & VM_PROT_ALL) {
+			pte_proto(PTE_PROTO_KERNEL, prot,
+				  PTE_PROTO_CACHE) =
+			    L2_SPAGE | PT_AP(AP_KR) | CACHE_WT;
+
+			pte_proto(PTE_PROTO_KERNEL, prot,
+				  PTE_PROTO_NOCACHE) =
+			    L2_SPAGE | PT_AP(AP_KR);
+
+			pte_proto(PTE_PROTO_USER, prot,
+				  PTE_PROTO_CACHE) =
+			    L2_SPAGE | PT_AP(AP_KRWUR) | CACHE_WT;
+
+			pte_proto(PTE_PROTO_USER, prot,
+				  PTE_PROTO_NOCACHE) =
+			    L2_SPAGE | PT_AP(AP_KRWUR);
+
+			l1sec_proto(prot, PTE_PROTO_CACHE) =
+			    (AP_KRW << AP_SECTION_SHIFT) |
+			    L1_SECTION | CACHE_WT;
+
+			l1sec_proto(prot, PTE_PROTO_NOCACHE) =
+			    (AP_KRW << AP_SECTION_SHIFT) |
+			    L1_SECTION;
+
+			lpte_proto(prot, PTE_PROTO_CACHE) =
+			    L2_LPAGE | PT_AP(AP_KR) | CACHE_WT;
+
+			lpte_proto(prot, PTE_PROTO_NOCACHE) =
+			    L2_LPAGE | PT_AP(AP_KR);
+		}
+	}
+#undef CACHE_WT
+#undef CACHE_WB
+}
+#endif /* CPU_XSCALE */
Index: arm/footbridge/footbridge_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/footbridge/footbridge_machdep.c,v
retrieving revision 1.5
diff -u -p -r1.5 footbridge_machdep.c
--- arm/footbridge/footbridge_machdep.c	2002/01/05 22:41:48	1.5
+++ arm/footbridge/footbridge_machdep.c	2002/02/02 00:16:04
@@ -77,7 +77,9 @@ footbridge_sa110_cc_setup(void)
 
 	for (loop = 0; loop < cleanarea; loop += NBPG) {
 		pte = pmap_pte(pmap_kernel(), (addr + loop));
-		*pte = L2_PTE(DC21285_SA_CACHE_FLUSH_BASE + loop, AP_KR);
+		*pte = (DC21285_SA_CACHE_FLUSH_BASE + loop) |
+		    pte_proto(PTE_PROTO_KERNEL, VM_PROT_READ,
+		    PTE_PROTO_CACHE);
 	}
 	sa110_cache_clean_addr = addr;
 	sa110_cache_clean_size = cleanarea / 2;
Index: arm/include/arm32/machdep.h
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/include/arm32/machdep.h,v
retrieving revision 1.3
diff -u -p -r1.3 machdep.h
--- arm/include/arm32/machdep.h	2002/01/20 03:41:48	1.3
+++ arm/include/arm32/machdep.h	2002/02/02 00:16:05
@@ -11,14 +11,6 @@ void prefetch_abort_handler __P((trapfra
 void undefinedinstruction_bounce __P((trapframe_t *));
 void dumpsys	__P((void));
 
-void	map_section(vaddr_t, vaddr_t, paddr_t, int);
-void	map_pagetable(vaddr_t, vaddr_t, paddr_t);
-void	map_entry(vaddr_t, vaddr_t, paddr_t);
-void	map_entry_nc(vaddr_t, vaddr_t, paddr_t);
-void	map_entry_ro(vaddr_t, vaddr_t, paddr_t); 
-vsize_t map_chunk(vaddr_t, vaddr_t, vaddr_t, paddr_t, vsize_t,
-	    u_int, u_int);
-
 /* 
  * note that we use void * as all the platforms have different ideas on what
  * the structure is
Index: arm/include/arm32/pmap.h
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/include/arm32/pmap.h,v
retrieving revision 1.20
diff -u -p -r1.20 pmap.h
--- arm/include/arm32/pmap.h	2002/01/19 16:55:22	1.20
+++ arm/include/arm32/pmap.h	2002/02/02 00:16:08
@@ -99,24 +99,6 @@ struct pmap {
 typedef struct pmap *pmap_t;
 
 /*
- * for each managed physical page we maintain a list of <PMAP,VA>'s
- * which it is mapped at.  the list is headed by a pv_head structure.
- * there is one pv_head per managed phys page (allocated at boot time).
- * the pv_head structure points to a list of pv_entry structures (each
- * describes one mapping).
- *
- * pv_entry's are only visible within pmap.c, so only provide a placeholder
- * here
- */
-
-struct pv_entry;
-
-struct pv_head {
-	struct simplelock pvh_lock;	/* locks every pv on this list */
-	struct pv_entry *pvh_list;	/* head of list (locked by pvh_lock) */
-};
-
-/*
  * Page hooks. I'll eliminate these sometime soon :-)
  *
  * For speed we store the both the virtual address and the page table
@@ -138,10 +120,35 @@ typedef struct {
 } pv_addr_t;
 
 /*
- * _KERNEL specific macros, functions and prototypes
+ * Prototype PTE bits for each VM protection code, both cached
+ * and un-cached, kernel and userland.
  */
-
-#ifdef  _KERNEL
+extern pt_entry_t pte_protos[4][8];
+extern pd_entry_t l1sec_protos[2][8];
+extern pt_entry_t lpte_protos[2][8];
+extern pd_entry_t pde_proto;
+
+#define	PTE_PROTO_KERNEL	0
+#define	PTE_PROTO_USER		1
+#define	PTE_PROTO_NOCACHE	0
+#define	PTE_PROTO_CACHE		1
+
+#define	pte_proto(ku, prot, cache)					\
+	pte_protos[(ku) + ((cache) << 1)][(prot)]
+
+#define	l1sec_proto(prot, cache)					\
+	l1sec_protos[(cache)][(prot)]
+
+#define	lpte_proto(prot, cache)						\
+	lpte_protos[(cache)][(prot)]
+
+#define	pmap_pte_proto(pm, prot, cache)					\
+	pte_proto((pm == pmap_kernel()) ? PTE_PROTO_KERNEL		\
+					: PTE_PROTO_USER, (prot), (cache))
+
+void	pmap_pte_protos_init_arm678(void);
+void	pmap_pte_protos_init_arm9(void);
+void	pmap_pte_protos_init_xscale(void);
 
 /*
  * Commonly referenced structures
@@ -176,13 +183,17 @@ int pmap_modified_emulation __P((struct 
 void pmap_postinit __P((void));
 pt_entry_t *pmap_pte __P((struct pmap *, vaddr_t));
 
+/* Bootstrapping routines. */
+void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
+void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
+void	pmap_map_l2pt(vaddr_t, vaddr_t, paddr_t);
+vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
+
 /*
  * Special page zero routine for use by the idle loop (no cache cleans). 
  */
 boolean_t	pmap_pageidlezero __P((paddr_t));
 #define PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
-
-#endif	/* _KERNEL */
 
 /*
  * Useful macros and constants 
Index: arm/include/arm32/pte.h
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/include/arm32/pte.h,v
retrieving revision 1.1
diff -u -p -r1.1 pte.h
--- arm/include/arm32/pte.h	2001/11/23 17:39:04	1.1
+++ arm/include/arm32/pte.h	2002/02/02 00:16:09
@@ -74,12 +74,6 @@ typedef	int	pt_entry_t;		/* page table e
 #define PT_C		0x08	/* Phys - Cacheable */
 #define PT_U		0x10	/* Phys - Updateable */
 
-#ifndef _LOCORE
-extern pt_entry_t	pte_cache_mode;
-
-#define PT_CACHEABLE	(pte_cache_mode)
-#endif
-
 /* Page R/M attributes (in pmseg.attrs). */
 #define PT_M		0x01	/* Virt - Modified */
 #define PT_H		0x02	/* Virt - Handled (Used) */
@@ -103,18 +97,6 @@ extern pt_entry_t	pte_cache_mode;
 #define L2_SPAGE	0x02	/* L2 small page (4KB) */
 #define L2_MASK		0x03	/* Mask for L2 entry type */
 #define L2_INVAL	0x00	/* L2 invalid type */
-
-/* PTE construction macros */
-#define	L2_LPTE(p, a, f)	((p) | PT_AP(a) | L2_LPAGE | (f))
-#define L2_SPTE(p, a, f)	((p) | PT_AP(a) | L2_SPAGE | (f))
-#define L2_PTE(p, a)		L2_SPTE((p), (a), PT_CACHEABLE)
-#define L2_PTE_NC(p, a)		L2_SPTE((p), (a), PT_B)
-#define L2_PTE_NC_NB(p, a)	L2_SPTE((p), (a), 0)
-#define L1_SECPTE(p, a, f)	((p) | ((a) << AP_SECTION_SHIFT) | (f) \
-				| L1_SECTION | PT_U)
-
-#define L1_PTE(p)	((p) | 0x00 | L1_PAGE | PT_U)
-#define L1_SEC(p, c)	L1_SECPTE((p), AP_KRW, (c))
 
 #define L1_SEC_SIZE	(1 << PDSHIFT)
 #define L2_LPAGE_SIZE	(NBPG * 16)
Index: arm/include/arm32/vmparam.h
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/include/arm32/vmparam.h,v
retrieving revision 1.3
diff -u -p -r1.3 vmparam.h
--- arm/include/arm32/vmparam.h	2001/11/23 18:16:10	1.3
+++ arm/include/arm32/vmparam.h	2002/02/02 00:16:11
@@ -1,7 +1,7 @@
 /*	$NetBSD: vmparam.h,v 1.3 2001/11/23 18:16:10 thorpej Exp $	*/
 
 /*
- * Copyright (c) 2001 Wasabi Systems, Inc.
+ * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
  * All rights reserved.
  *
  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
@@ -44,8 +44,8 @@
  * Virtual Memory parameters common to all arm32 platforms.
  */
 
-/* for pt_entry_t definition */
-#include <arm/arm32/pte.h>
+#include <sys/lock.h>		/* struct simplelock */
+#include <arm/arm32/pte.h>	/* pt_entry_t */
 
 #define	USRTEXT		VM_MIN_ADDRESS
 #define	USRSTACK	VM_MAXUSER_ADDRESS
@@ -106,16 +106,21 @@
 #define	VM_MAX_KERNEL_ADDRESS	((vaddr_t) 0xffffffff)
 
 /*
- * define structure pmap_physseg: there is one of these structures
- * for each chunk of noncontig RAM you have.
+ * pmap-specific data store in the vm_page structure.
  */
-
-#define	__HAVE_PMAP_PHYSSEG
-
-struct pmap_physseg {
-	struct pv_head *pvhead;		/* pv_entry array */
-	char *attrs;			/* attrs array */
+#define	__HAVE_VM_PAGE_MD
+struct vm_page_md {
+	struct pv_entry *pvh_list;		/* pv_entry list */
+	struct simplelock pvh_slock;		/* lock on this head */
+	int pvh_attrs;				/* page attributes */
 };
+
+#define	VM_MDPAGE_INIT(pg)						\
+do {									\
+	(pg)->mdpage.pvh_list = NULL;					\
+	simple_lock_init(&(pg)->mdpage.pvh_slock);			\
+	(pg)->mdpage.pvh_attrs = 0;					\
+} while (/*CONSTCOND*/0)
 
 #endif /* _KERNEL */
 
Index: arm/mainbus/mainbus_io.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/mainbus/mainbus_io.c,v
retrieving revision 1.6
diff -u -p -r1.6 mainbus_io.c
--- arm/mainbus/mainbus_io.c	2001/11/23 17:23:42	1.6
+++ arm/mainbus/mainbus_io.c	2002/02/02 00:16:21
@@ -163,11 +163,13 @@ mainbus_bs_map(t, bpa, size, cacheable, 
 
 	for(pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
-		pte = pmap_pte(pmap_kernel(), va);
-		if (cacheable)
-			*pte |= PT_CACHEABLE;
-		else
-			*pte &= ~PT_CACHEABLE;
+		if (cacheable == 0) {
+			pte = pmap_pte(pmap_kernel(), va);
+			*pte = (*pte & PG_FRAME) |
+			    pmap_pte_proto(pmap_kernel(),
+					   VM_PROT_READ|VM_PROT_WRITE,
+					   PTE_PROTO_NOCACHE);
+		}
 	}
 	pmap_update(pmap_kernel());
 
Index: arm/sa11x0/sa11x0_io.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/sa11x0/sa11x0_io.c,v
retrieving revision 1.5
diff -u -p -r1.5 sa11x0_io.c
--- arm/sa11x0/sa11x0_io.c	2001/11/23 17:23:42	1.5
+++ arm/sa11x0/sa11x0_io.c	2002/02/02 00:16:24
@@ -166,11 +166,13 @@ sa11x0_bs_map(t, bpa, size, cacheable, b
 
 	for(pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
-		pte = pmap_pte(pmap_kernel(), va);
-		if (cacheable)
-			*pte |= PT_CACHEABLE;
-		else
-			*pte &= ~PT_CACHEABLE;
+		if (cacheable == 0) {
+			pte = pmap_pte(pmap_kernel(), va);
+			*pte = (*pte & PG_FRAME) |
+			    pmap_pte_proto(pmap_kernel(),
+					   VM_PROT_READ|VM_PROT_WRITE,
+					   PTE_PROTO_NOCACHE);
+		}
 	}
 	pmap_update(pmap_kernel());
 
Index: arm32/ofw/ofw.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm32/ofw/ofw.c,v
retrieving revision 1.30
diff -u -p -r1.30 ofw.c
--- arm32/ofw/ofw.c	2002/01/25 19:19:26	1.30
+++ arm32/ofw/ofw.c	2002/02/02 00:16:47
@@ -93,12 +93,6 @@ extern int ofw_handleticks;
 /*
  *  Imported routines
  */
-extern void map_section	    __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa,
-				 int cacheable));
-extern void map_pagetable   __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-extern void map_entry	    __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-extern void map_entry_nc    __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-extern void map_entry_ro    __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
 extern void dump_spl_masks  __P((void));
 extern void dumpsys	    __P((void));
 extern void dotickgrovelling __P((vm_offset_t));
@@ -1329,10 +1323,10 @@ ofw_construct_proc0_addrspace(proc0_ttbb
 			}
 
 			/* Make the entry. */
-			if ((tp->mode & 0xC) == 0xC)
-				map_entry(L2pagetable, va, pa);
-			else
-				map_entry_nc(L2pagetable, va, pa);
+			pmap_map_entry(L2pagetable, va, pa,
+			    VM_PROT_READ|VM_PROT_WRITE,
+			    (tp->mode & 0xC) == 0xC ? PTE_PROTO_CACHE
+						    : PTE_PROTO_NOCACHE);
 		}
 	}
 
@@ -1364,12 +1358,14 @@ ofw_construct_proc0_addrspace(proc0_ttbb
 	 * cached ...
 	 * Really these should be uncached when allocated.
 	 */
-	map_entry_nc(proc0_pt_kernel.pv_va, proc0_pt_pte.pv_va,
-	    proc0_pt_pte.pv_pa);
+	pmap_map_entry(proc0_pt_kernel.pv_va, proc0_pt_pte.pv_va,
+	    proc0_pt_pte.pv_pa, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE);
 	for (i = 0; i < (PD_SIZE / NBPG); ++i)
-		map_entry_nc(proc0_pt_kernel.pv_va,
+		pmap_map_entry(proc0_pt_kernel.pv_va,
 		    proc0_pagedir.pv_va + NBPG * i,
-		    proc0_pagedir.pv_pa + NBPG * i);
+		    proc0_pagedir.pv_pa + NBPG * i,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Construct the proc0 L2 pagetables that map page tables.
@@ -1377,37 +1373,41 @@ ofw_construct_proc0_addrspace(proc0_ttbb
 
 	/* Map entries in the L2pagetable used to map L2PTs. */
 	L2pagetable = proc0_pt_pte.pv_va;
-	map_entry_nc(L2pagetable, (0x00000000 >> (PGSHIFT-2)),
-	    proc0_pt_sys.pv_pa);
-	map_entry_nc(L2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
-	    proc0_pt_kernel.pv_pa);
-	map_entry_nc(L2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
-	    proc0_pt_pte.pv_pa);
+	pmap_map_entry(L2pagetable, (0x00000000 >> (PGSHIFT-2)),
+	    proc0_pt_sys.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(L2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
+	    proc0_pt_kernel.pv_pa, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE);
+	pmap_map_entry(L2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
+	    proc0_pt_pte.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	for (i = 0; i < KERNEL_VMDATA_PTS; i++)
-		map_entry_nc(L2pagetable, ((KERNEL_VM_BASE + i * 0x00400000)
-		    >> (PGSHIFT-2)), proc0_pt_vmdata[i].pv_pa);
+		pmap_map_entry(L2pagetable, ((KERNEL_VM_BASE + i * 0x00400000)
+		    >> (PGSHIFT-2)), proc0_pt_vmdata[i].pv_pa,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	for (i = 0; i < KERNEL_OFW_PTS; i++)
-		map_entry_nc(L2pagetable, ((OFW_VIRT_BASE + i * 0x00400000)
-		    >> (PGSHIFT-2)), proc0_pt_ofw[i].pv_pa);
+		pmap_map_entry(L2pagetable, ((OFW_VIRT_BASE + i * 0x00400000)
+		    >> (PGSHIFT-2)), proc0_pt_ofw[i].pv_pa,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	for (i = 0; i < KERNEL_IO_PTS; i++)
-		map_entry_nc(L2pagetable, ((IO_VIRT_BASE + i * 0x00400000)
-		    >> (PGSHIFT-2)), proc0_pt_io[i].pv_pa);
+		pmap_map_entry(L2pagetable, ((IO_VIRT_BASE + i * 0x00400000)
+		    >> (PGSHIFT-2)), proc0_pt_io[i].pv_pa,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Construct the proc0 L1 pagetable. */
 	L1pagetable = proc0_pagedir.pv_va;
 
-	map_pagetable(L1pagetable, 0x0, proc0_pt_sys.pv_pa);
-	map_pagetable(L1pagetable, KERNEL_BASE, proc0_pt_kernel.pv_pa);
-	map_pagetable(L1pagetable, PROCESS_PAGE_TBLS_BASE,
+	pmap_map_l2pt(L1pagetable, 0x0, proc0_pt_sys.pv_pa);
+	pmap_map_l2pt(L1pagetable, KERNEL_BASE, proc0_pt_kernel.pv_pa);
+	pmap_map_l2pt(L1pagetable, PROCESS_PAGE_TBLS_BASE,
 	    proc0_pt_pte.pv_pa);
 	for (i = 0; i < KERNEL_VMDATA_PTS; i++)
-		map_pagetable(L1pagetable, KERNEL_VM_BASE + i * 0x00400000,
+		pmap_map_l2pt(L1pagetable, KERNEL_VM_BASE + i * 0x00400000,
 		    proc0_pt_vmdata[i].pv_pa);
 	for (i = 0; i < KERNEL_OFW_PTS; i++)
-		map_pagetable(L1pagetable, OFW_VIRT_BASE + i * 0x00400000,
+		pmap_map_l2pt(L1pagetable, OFW_VIRT_BASE + i * 0x00400000,
 		    proc0_pt_ofw[i].pv_pa);
 	for (i = 0; i < KERNEL_IO_PTS; i++)
-		map_pagetable(L1pagetable, IO_VIRT_BASE + i * 0x00400000,
+		pmap_map_l2pt(L1pagetable, IO_VIRT_BASE + i * 0x00400000,
 		    proc0_pt_io[i].pv_pa);
 
 	/* 
@@ -1422,16 +1422,10 @@ ofw_construct_proc0_addrspace(proc0_ttbb
 			int nsections = tp->size / NBPD;
 
 			while (nsections--) {
-				map_section(L1pagetable, va, pa, 0);
-
-				/*
-				 * even grosser hack:
-				 * blast B & C bits if necessary
-				 */
-				if ((tp->mode & 0xC) == 0xC)
-					((u_int *)L1pagetable)[(va >> PDSHIFT)]
-					    |= PT_B | PT_C;
-
+				pmap_map_section(L1pagetable, va, pa,
+				    VM_PROT_READ|VM_PROT_WRITE,
+				    (tp->mode & 0xC) == 0xC ? PTE_PROTO_CACHE :
+				    PTE_PROTO_NOCACHE);
 				va += NBPD;
 				pa += NBPD;
 			} /* END while */
Index: cats/cats/cats_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/cats/cats/cats_machdep.c,v
retrieving revision 1.12
diff -u -p -r1.12 cats_machdep.c
--- cats/cats/cats_machdep.c	2002/01/25 19:19:28	1.12
+++ cats/cats/cats_machdep.c	2002/02/02 00:16:57
@@ -274,30 +274,38 @@ struct l1_sec_map {
 	vm_offset_t	va;
 	vm_offset_t	pa;
 	vm_size_t	size;
-	int		flags;
+	int		prot;
+	int		cache;
 } l1_sec_table[] = {
 	/* Map 1MB for CSR space */
 	{ DC21285_ARMCSR_VBASE,			DC21285_ARMCSR_BASE,
-	    DC21285_ARMCSR_VSIZE,		0 },
+	    DC21285_ARMCSR_VSIZE,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
 	/* Map 1MB for fast cache cleaning space */
 	{ DC21285_CACHE_FLUSH_VBASE,		DC21285_SA_CACHE_FLUSH_BASE,
-	    DC21285_CACHE_FLUSH_VSIZE,		1 },
+	    DC21285_CACHE_FLUSH_VSIZE,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE },
 	/* Map 1MB for PCI IO space */
 	{ DC21285_PCI_IO_VBASE,			DC21285_PCI_IO_BASE,
-	    DC21285_PCI_IO_VSIZE,		0 },
+	    DC21285_PCI_IO_VSIZE,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
 	/* Map 1MB for PCI IACK space */
 	{ DC21285_PCI_IACK_VBASE,		DC21285_PCI_IACK_SPECIAL,
-	    DC21285_PCI_IACK_VSIZE,		0 },
+	    DC21285_PCI_IACK_VSIZE,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
 	/* Map 16MB of type 1 PCI config access */
 	{ DC21285_PCI_TYPE_1_CONFIG_VBASE,	DC21285_PCI_TYPE_1_CONFIG,
-	    DC21285_PCI_TYPE_1_CONFIG_VSIZE,	0 },
+	    DC21285_PCI_TYPE_1_CONFIG_VSIZE,	VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
 	/* Map 16MB of type 0 PCI config access */
 	{ DC21285_PCI_TYPE_0_CONFIG_VBASE,	DC21285_PCI_TYPE_0_CONFIG,
-	    DC21285_PCI_TYPE_0_CONFIG_VSIZE,	0 },
+	    DC21285_PCI_TYPE_0_CONFIG_VSIZE,	VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
 	/* Map 1MB of 32 bit PCI address space for ISA MEM accesses via PCI */
 	{ DC21285_PCI_ISA_MEM_VBASE,		DC21285_PCI_MEM_BASE,
-	    DC21285_PCI_ISA_MEM_VSIZE,		0 },
-	{ 0, 0, 0, 0 }
+	    DC21285_PCI_ISA_MEM_VSIZE,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
+	{ 0, 0, 0, 0, 0 }
 };
 
 /*
@@ -520,17 +528,17 @@ initarm(bootargs)
 	l1pagetable = kernel_l1pt.pv_pa;
 
 	/* Map the L2 pages tables in the L1 page table */
-	map_pagetable(l1pagetable, 0x00000000,
+	pmap_map_l2pt(l1pagetable, 0x00000000,
 	    kernel_pt_table[KERNEL_PT_SYS]);
-	map_pagetable(l1pagetable, KERNEL_BASE,
+	pmap_map_l2pt(l1pagetable, KERNEL_BASE,
 	    kernel_pt_table[KERNEL_PT_KERNEL]);
-	map_pagetable(l1pagetable, KERNEL_BASE + 0x00400000,
+	pmap_map_l2pt(l1pagetable, KERNEL_BASE + 0x00400000,
 	    kernel_pt_table[KERNEL_PT_KERNEL2]);
 
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
-		map_pagetable(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
+		pmap_map_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
 		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
-	map_pagetable(l1pagetable, PROCESS_PAGE_TBLS_BASE,
+	pmap_map_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
 	    kernel_ptpt.pv_pa);
 
 #ifdef VERBOSE_INIT_ARM
@@ -545,23 +553,26 @@ initarm(bootargs)
 	else {
 		extern int end;
 
-		logical = map_chunk(l1pagetable, l2pagetable,
+		logical = pmap_map_chunk(l1pagetable, l2pagetable,
 			KERNEL_TEXT_BASE,
 			physical_start, kernexec->a_text,
-			AP_KR, PT_CACHEABLE);
-		logical += map_chunk(l1pagetable, l2pagetable,
+			VM_PROT_READ, PTE_PROTO_CACHE);
+		logical += pmap_map_chunk(l1pagetable, l2pagetable,
 			KERNEL_TEXT_BASE + logical,
 			physical_start + logical, kernexec->a_data,
-			AP_KRW, PT_CACHEABLE);
-		logical += map_chunk(l1pagetable, l2pagetable,
+			VM_PROT_READ|VM_PROT_WRITE,
+			PTE_PROTO_CACHE);
+		logical += pmap_map_chunk(l1pagetable, l2pagetable,
 			KERNEL_TEXT_BASE + logical,
 			physical_start + logical, kernexec->a_bss,
-			AP_KRW, PT_CACHEABLE);
-		logical += map_chunk(l1pagetable, l2pagetable,
+			VM_PROT_READ|VM_PROT_WRITE,
+			PTE_PROTO_CACHE);
+		logical += pmap_map_chunk(l1pagetable, l2pagetable,
 			KERNEL_TEXT_BASE + logical,
 			physical_start + logical, kernexec->a_syms + sizeof(int)
 			+ *(u_int *)((int)&end + kernexec->a_syms + sizeof(int)),
-			AP_KRW, PT_CACHEABLE);
+			VM_PROT_READ|VM_PROT_WRITE,
+			PTE_PROTO_CACHE);
 	}
 
 	/*
@@ -580,22 +591,28 @@ initarm(bootargs)
 #endif
 
 	/* Map the boot arguments page */
-	map_entry_ro(l2pagetable, ebsabootinfo.bt_vargp, ebsabootinfo.bt_pargp);
+	pmap_map_entry(l2pagetable, ebsabootinfo.bt_vargp,
+	    ebsabootinfo.bt_pargp, VM_PROT_READ, PTE_PROTO_CACHE);
 
 	/* Map the stack pages */
-	map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
-	    IRQ_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
-	    ABT_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
-	    UND_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
-	    UPAGES * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
-	    PD_SIZE, AP_KRW, 0);
+	pmap_map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
+	    IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
+	    ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
+	    UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
+	    UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
+	    PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Map the page table that maps the kernel pages */
-	map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
+	pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Map entries in the page table used to map PTE's
@@ -603,26 +620,32 @@ initarm(bootargs)
 	 */
 	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
 	l2pagetable = kernel_ptpt.pv_pa;
-	map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_KERNEL]);
-	map_entry_nc(l2pagetable, ((KERNEL_BASE +0x00400000) >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_KERNEL2]);
-
-	map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
-	    kernel_ptpt.pv_pa);
-	map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_SYS]);
+	pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_KERNEL],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, ((KERNEL_BASE +0x00400000) >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_KERNEL2],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+
+	pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
+	    kernel_ptpt.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_SYS],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
-		map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
+		pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
 		    (loop * 0x00400000)) >> (PGSHIFT-2)),
-		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
+		    kernel_pt_table[KERNEL_PT_VMDATA + loop],
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Map the system page in the kernel page table for the bottom 1Meg
 	 * of the virtual memory map.
 	 */
 	l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
-	map_entry(l2pagetable, 0x00000000, systempage.pv_pa);
+	pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 
 	/* Map the core memory needed before autoconfig */
 	loop = 0;
@@ -635,9 +658,11 @@ initarm(bootargs)
 		    l1_sec_table[loop].va);
 #endif
 		for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_SEC_SIZE)
-			map_section(l1pagetable, l1_sec_table[loop].va + sz,
+			pmap_map_section(l1pagetable, 
+			    l1_sec_table[loop].va + sz,
 			    l1_sec_table[loop].pa + sz,
-			    l1_sec_table[loop].flags);
+			    l1_sec_table[loop].prot,
+			    l1_sec_table[loop].cache);
 		++loop;
 	}
 
Index: dnard/ofw/ofw.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/dnard/ofw/ofw.c,v
retrieving revision 1.5
diff -u -p -r1.5 ofw.c
--- dnard/ofw/ofw.c	2002/01/25 19:19:28	1.5
+++ dnard/ofw/ofw.c	2002/02/02 00:17:17
@@ -93,12 +93,6 @@ extern int ofw_handleticks;
 /*
  *  Imported routines
  */
-extern void map_section	    __P((vaddr_t pt, vaddr_t va, vaddr_t pa,
-				 int cacheable));
-extern void map_pagetable   __P((vaddr_t pt, vaddr_t va, vaddr_t pa));
-extern void map_entry	    __P((vaddr_t pt, vaddr_t va, vaddr_t pa));
-extern void map_entry_nc    __P((vaddr_t pt, vaddr_t va, vaddr_t pa));
-extern void map_entry_ro    __P((vaddr_t pt, vaddr_t va, vaddr_t pa));
 extern void dump_spl_masks  __P((void));
 extern void dumpsys	    __P((void));
 extern void dotickgrovelling __P((vaddr_t));
@@ -1330,10 +1324,10 @@ ofw_construct_proc0_addrspace(proc0_ttbb
 			}
 
 			/* Make the entry. */
-			if ((tp->mode & 0xC) == 0xC)
-				map_entry(L2pagetable, va, pa);
-			else
-				map_entry_nc(L2pagetable, va, pa);
+			pmap_map_entry(L2pagetable, va, pa,
+			    VM_PROT_READ|VM_PROT_WRITE,
+			    (tp->mode & 0xC) == 0xC ? PTE_PROTO_CACHE
+						    : PTE_PROTO_NOCACHE);
 		}
 	}
 
@@ -1365,12 +1359,14 @@ ofw_construct_proc0_addrspace(proc0_ttbb
 	 * cached ...
 	 * Really these should be uncached when allocated.
 	 */
-	map_entry_nc(proc0_pt_kernel.pv_va, proc0_pt_pte.pv_va,
-	    proc0_pt_pte.pv_pa);
+	pmap_map_entry(proc0_pt_kernel.pv_va, proc0_pt_pte.pv_va,
+	    proc0_pt_pte.pv_pa, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE);
 	for (i = 0; i < (PD_SIZE / NBPG); ++i)
-		map_entry_nc(proc0_pt_kernel.pv_va,
+		pmap_map_entry(proc0_pt_kernel.pv_va,
 		    proc0_pagedir.pv_va + NBPG * i,
-		    proc0_pagedir.pv_pa + NBPG * i);
+		    proc0_pagedir.pv_pa + NBPG * i,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Construct the proc0 L2 pagetables that map page tables.
@@ -1378,37 +1374,43 @@ ofw_construct_proc0_addrspace(proc0_ttbb
 
 	/* Map entries in the L2pagetable used to map L2PTs. */
 	L2pagetable = proc0_pt_pte.pv_va;
-	map_entry_nc(L2pagetable, (0x00000000 >> (PGSHIFT-2)),
-	    proc0_pt_sys.pv_pa);
-	map_entry_nc(L2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
-	    proc0_pt_kernel.pv_pa);
-	map_entry_nc(L2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
-	    proc0_pt_pte.pv_pa);
+	pmap_map_entry(L2pagetable, (0x00000000 >> (PGSHIFT-2)),
+	    proc0_pt_sys.pv_pa, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE);
+	pmap_map_entry(L2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
+	    proc0_pt_kernel.pv_pa, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE);
+	pmap_map_entry(L2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
+	    proc0_pt_pte.pv_pa, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE);
 	for (i = 0; i < KERNEL_VMDATA_PTS; i++)
-		map_entry_nc(L2pagetable, ((KERNEL_VM_BASE + i * 0x00400000)
-		    >> (PGSHIFT-2)), proc0_pt_vmdata[i].pv_pa);
+		pmap_map_entry(L2pagetable, ((KERNEL_VM_BASE + i * 0x00400000)
+		    >> (PGSHIFT-2)), proc0_pt_vmdata[i].pv_pa,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	for (i = 0; i < KERNEL_OFW_PTS; i++)
-		map_entry_nc(L2pagetable, ((OFW_VIRT_BASE + i * 0x00400000)
-		    >> (PGSHIFT-2)), proc0_pt_ofw[i].pv_pa);
+		pmap_map_entry(L2pagetable, ((OFW_VIRT_BASE + i * 0x00400000)
+		    >> (PGSHIFT-2)), proc0_pt_ofw[i].pv_pa,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	for (i = 0; i < KERNEL_IO_PTS; i++)
-		map_entry_nc(L2pagetable, ((IO_VIRT_BASE + i * 0x00400000)
-		    >> (PGSHIFT-2)), proc0_pt_io[i].pv_pa);
+		pmap_map_entry(L2pagetable, ((IO_VIRT_BASE + i * 0x00400000)
+		    >> (PGSHIFT-2)), proc0_pt_io[i].pv_pa,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Construct the proc0 L1 pagetable. */
 	L1pagetable = proc0_pagedir.pv_va;
 
-	map_pagetable(L1pagetable, 0x0, proc0_pt_sys.pv_pa);
-	map_pagetable(L1pagetable, KERNEL_BASE, proc0_pt_kernel.pv_pa);
-	map_pagetable(L1pagetable, PROCESS_PAGE_TBLS_BASE,
+	pmap_map_l2pt(L1pagetable, 0x0, proc0_pt_sys.pv_pa);
+	pmap_map_l2pt(L1pagetable, KERNEL_BASE, proc0_pt_kernel.pv_pa);
+	pmap_map_l2pt(L1pagetable, PROCESS_PAGE_TBLS_BASE,
 	    proc0_pt_pte.pv_pa);
 	for (i = 0; i < KERNEL_VMDATA_PTS; i++)
-		map_pagetable(L1pagetable, KERNEL_VM_BASE + i * 0x00400000,
+		pmap_map_l2pt(L1pagetable, KERNEL_VM_BASE + i * 0x00400000,
 		    proc0_pt_vmdata[i].pv_pa);
 	for (i = 0; i < KERNEL_OFW_PTS; i++)
-		map_pagetable(L1pagetable, OFW_VIRT_BASE + i * 0x00400000,
+		pmap_map_l2pt(L1pagetable, OFW_VIRT_BASE + i * 0x00400000,
 		    proc0_pt_ofw[i].pv_pa);
 	for (i = 0; i < KERNEL_IO_PTS; i++)
-		map_pagetable(L1pagetable, IO_VIRT_BASE + i * 0x00400000,
+		pmap_map_l2pt(L1pagetable, IO_VIRT_BASE + i * 0x00400000,
 		    proc0_pt_io[i].pv_pa);
 
 	/* 
@@ -1423,15 +1425,10 @@ ofw_construct_proc0_addrspace(proc0_ttbb
 			int nsections = tp->size / NBPD;
 
 			while (nsections--) {
-				map_section(L1pagetable, va, pa, 0);
-
-				/*
-				 * even grosser hack:
-				 * blast B & C bits if necessary
-				 */
-				if ((tp->mode & 0xC) == 0xC)
-					((u_int *)L1pagetable)[(va >> PDSHIFT)]
-					    |= PT_B | PT_C;
+				pmap_map_section(L1pagetable, va, pa,
+				    VM_PROT_READ|VM_PROT_WRITE,
+				    (tp->mode & 0xC) == 0xC ? PTE_PROTO_CACHE :
+				    PTE_PROTO_NOCACHE);
 
 				va += NBPD;
 				pa += NBPD;
Index: evbarm/integrator/int_bus_dma.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/evbarm/integrator/int_bus_dma.c,v
retrieving revision 1.5
diff -u -p -r1.5 int_bus_dma.c
--- evbarm/integrator/int_bus_dma.c	2002/01/25 20:57:43	1.5
+++ evbarm/integrator/int_bus_dma.c	2002/02/02 00:17:27
@@ -350,7 +350,10 @@ integrator_bus_dmamem_map(t, segs, nsegs
 				cpu_dcache_wbinv_range(va, NBPG);
 				cpu_drain_writebuf();
 				ptep = vtopte(va);
-				*ptep = ((*ptep) & (~PT_C | PT_B));
+				*ptep = (*ptep & PG_FRAME) |
+				    pmap_pte_proto(pmap_kernel(),
+						   VM_PROT_READ|VM_PROT_WRITE,
+						   PTE_PROTO_NOCACHE);
 				tlb_flush();
 			}
 #ifdef DEBUG_DMA
Index: evbarm/integrator/integrator_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/evbarm/integrator/integrator_machdep.c,v
retrieving revision 1.8
diff -u -p -r1.8 integrator_machdep.c
--- evbarm/integrator/integrator_machdep.c	2002/01/30 04:00:47	1.8
+++ evbarm/integrator/integrator_machdep.c	2002/02/02 00:17:37
@@ -165,16 +165,6 @@ struct user *proc0paddr;
 
 void consinit		__P((void));
 
-void map_section	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa,
-			     int cacheable));
-void map_pagetable	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-void map_entry		__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-void map_entry_nc	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-void map_entry_ro	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-vm_size_t map_chunk	__P((vm_offset_t pd, vm_offset_t pt, vm_offset_t va,
-			     vm_offset_t pa, vm_size_t size, u_int acc,
-			     u_int flg));
-
 void process_kernel_args	__P((char *));
 void data_abort_handler		__P((trapframe_t *frame));
 void prefetch_abort_handler	__P((trapframe_t *frame));
@@ -327,18 +317,23 @@ struct l1_sec_map {
 	vm_offset_t	va;
 	vm_offset_t	pa;
 	vm_size_t	size;
-	int		flags;
+	int		prot;
+	int		cache;
 } l1_sec_table[] = {
 #if NPLCOM > 0 && defined(PLCONSOLE)
-	{ UART0_BOOT_BASE, IFPGA_IO_BASE + IFPGA_UART0, 1024 * 1024, 0},
-	{ UART1_BOOT_BASE, IFPGA_IO_BASE + IFPGA_UART1, 1024 * 1024, 0},
+	{ UART0_BOOT_BASE, IFPGA_IO_BASE + IFPGA_UART0, 1024 * 1024,
+	  VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE },
+	{ UART1_BOOT_BASE, IFPGA_IO_BASE + IFPGA_UART1, 1024 * 1024,
+	  VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE },
 #endif
 #if NPCI > 0
-	{ IFPGA_PCI_IO_VBASE, IFPGA_PCI_IO_BASE, IFPGA_PCI_IO_VSIZE, 0},
-	{ IFPGA_PCI_CONF_VBASE, IFPGA_PCI_CONF_BASE, IFPGA_PCI_CONF_VSIZE, 0},
+	{ IFPGA_PCI_IO_VBASE, IFPGA_PCI_IO_BASE, IFPGA_PCI_IO_VSIZE,
+	  VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE },
+	{ IFPGA_PCI_CONF_VBASE, IFPGA_PCI_CONF_BASE, IFPGA_PCI_CONF_VSIZE,
+	  VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE },
 #endif
 
-	{ 0, 0, 0, 0 }
+	{ 0, 0, 0, 0, 0 }
 };
 
 /*
@@ -617,14 +612,14 @@ initarm(bootinfo)
 	l1pagetable = kernel_l1pt.pv_pa;
 
 	/* Map the L2 pages tables in the L1 page table */
-	map_pagetable(l1pagetable, 0x00000000,
+	pmap_map_l2pt(l1pagetable, 0x00000000,
 	    kernel_pt_table[KERNEL_PT_SYS]);
-	map_pagetable(l1pagetable, KERNEL_BASE,
+	pmap_map_l2pt(l1pagetable, KERNEL_BASE,
 	    kernel_pt_table[KERNEL_PT_KERNEL]);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
-		map_pagetable(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
+		pmap_map_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
 		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
-	map_pagetable(l1pagetable, PROCESS_PAGE_TBLS_BASE,
+	pmap_map_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
 	    kernel_ptpt.pv_pa);
 
 #ifdef VERBOSE_INIT_ARM
@@ -643,20 +638,24 @@ initarm(bootinfo)
 		 */
 		textsize = textsize & ~PGOFSET;
 		totalsize = (totalsize + PGOFSET) & ~PGOFSET;
-		/* logical  = map_chunk(l1pagetable, l2pagetable, KERNEL_BASE,
+#if 0
+		logical = pmap_map_chunk(l1pagetable, l2pagetable, KERNEL_BASE,
 		    physical_start, KERNEL_TEXT_BASE - KERNEL_BASE,
-		    AP_KRW, PT_CACHEABLE); */
-		logical = map_chunk(l1pagetable, l2pagetable,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
+#endif
+		logical = pmap_map_chunk(l1pagetable, l2pagetable,
 		    KERNEL_TEXT_BASE, physical_start, textsize,
-		    AP_KRW, PT_CACHEABLE);
-		logical += map_chunk(l1pagetable, l2pagetable,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
+		logical += pmap_map_chunk(l1pagetable, l2pagetable,
 		    KERNEL_TEXT_BASE + logical, physical_start + logical,
-		    totalsize - textsize, AP_KRW, PT_CACHEABLE);
+		    totalsize - textsize, VM_PROT_READ|VM_PROT_WRITE,
+		    PTE_PROTO_CACHE);
 #if 0
-		logical += map_chunk(0, l2pagetable, KERNEL_BASE + logical,
+		logical += pmap_map_chunk(0, l2pagetable,
+		    KERNEL_BASE + logical,
 		    physical_start + logical, kernexec->a_syms + sizeof(int)
 		    + *(u_int *)((int)end + kernexec->a_syms + sizeof(int)),
-		    AP_KRW, PT_CACHEABLE);
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 #endif
 	}
 
@@ -666,23 +665,29 @@ initarm(bootinfo)
 
 	/* Map the boot arguments page */
 #if 0
-	map_entry_ro(l2pagetable, intbootinfo.bt_vargp, intbootinfo.bt_pargp);
+	pmap_map_entry(l2pagetable, intbootinfo.bt_vargp, intbootinfo.bt_pargp,
+	    VM_PROT_READ, PTE_PROTO_CACHE);
 #endif
 
 	/* Map the stack pages */
-	map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
-	    IRQ_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
-	    ABT_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
-	    UND_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
-	    UPAGES * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
-	    PD_SIZE, AP_KRW, 0);
+	pmap_map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
+	    IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
+	    ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
+	    UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
+	    UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
+	    PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Map the page table that maps the kernel pages */
-	map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
+	pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Map entries in the page table used to map PTE's
@@ -690,16 +695,19 @@ initarm(bootinfo)
 	 */
 	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
 	l2pagetable = kernel_ptpt.pv_pa;
-	map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_KERNEL]);
-	map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
-	    kernel_ptpt.pv_pa);
-	map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_SYS]);
+	pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_KERNEL],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
+	    kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_SYS], VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
-		map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
+		pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
 		    (loop * 0x00400000)) >> (PGSHIFT-2)),
-		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
+		    kernel_pt_table[KERNEL_PT_VMDATA + loop],
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Map the system page in the kernel page table for the bottom 1Meg
@@ -709,9 +717,11 @@ initarm(bootinfo)
 #if 1
 	/* MULTI-ICE requires that page 0 is NC/NB so that it can download
 	   the cache-clean code there.  */
-	map_entry_nc(l2pagetable, 0x00000000, systempage.pv_pa);
+	pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 #else
-	map_entry_nc(l2pagetable, 0x00000000, systempage.pv_pa);
+	pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 #endif
 	/* Map the core memory needed before autoconfig */
 	loop = 0;
@@ -724,9 +734,11 @@ initarm(bootinfo)
 		    l1_sec_table[loop].va);
 #endif
 		for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_SEC_SIZE)
-			map_section(l1pagetable, l1_sec_table[loop].va + sz,
+			pmap_map_section(l1pagetable,
+			    l1_sec_table[loop].va + sz,
 			    l1_sec_table[loop].pa + sz,
-			    l1_sec_table[loop].flags);
+			    l1_sec_table[loop].prot,
+			    l1_sec_table[loop].cache);
 		++loop;
 	}
 
Index: evbarm/iq80310/iq80310_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/evbarm/iq80310/iq80310_machdep.c,v
retrieving revision 1.18
diff -u -p -r1.18 iq80310_machdep.c
--- evbarm/iq80310/iq80310_machdep.c	2002/01/30 04:01:36	1.18
+++ evbarm/iq80310/iq80310_machdep.c	2002/02/02 00:17:47
@@ -271,7 +271,8 @@ struct l1_sec_map {
 	vaddr_t	va;
 	vaddr_t	pa;
 	vsize_t	size;
-	int flags;
+	int prot;
+	int cache;
 } l1_sec_table[] = {
     /*
      * Map the on-board devices VA == PA so that we can access them
@@ -281,7 +282,8 @@ struct l1_sec_map {
 	IQ80310_OBIO_BASE,
 	IQ80310_OBIO_BASE,
 	IQ80310_OBIO_SIZE,
-	0,
+	VM_PROT_READ|VM_PROT_WRITE,
+	PTE_PROTO_NOCACHE,
     },
 
     {
@@ -576,16 +578,16 @@ initarm(void *arg)
 	l1pagetable = kernel_l1pt.pv_pa;
 
 	/* Map the L2 pages tables in the L1 page table */
-	map_pagetable(l1pagetable, 0x00000000,
+	pmap_map_l2pt(l1pagetable, 0x00000000,
 	    kernel_pt_table[KERNEL_PT_SYS]);
-	map_pagetable(l1pagetable, KERNEL_BASE,
+	pmap_map_l2pt(l1pagetable, KERNEL_BASE,
 	    kernel_pt_table[KERNEL_PT_KERNEL]);
-	map_pagetable(l1pagetable, IQ80310_IOPXS_VBASE,
+	pmap_map_l2pt(l1pagetable, IQ80310_IOPXS_VBASE,
 	    kernel_pt_table[KERNEL_PT_IOPXS]);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
-		map_pagetable(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
+		pmap_map_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
 		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
-	map_pagetable(l1pagetable, PROCESS_PAGE_TBLS_BASE,
+	pmap_map_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
 	    kernel_ptpt.pv_pa);
 
 #ifdef VERBOSE_INIT_ARM
@@ -609,21 +611,21 @@ initarm(void *arg)
 		/*
 		 * This maps the kernel text/data/bss VA==PA.
 		 */
-		logical += map_chunk(l1pagetable, l2pagetable,
+		logical += pmap_map_chunk(l1pagetable, l2pagetable,
 		    KERNEL_BASE + logical,
 		    physical_start + logical, textsize,
-		    AP_KRW, PT_CACHEABLE);
-		logical += map_chunk(l1pagetable, l2pagetable,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
+		logical += pmap_map_chunk(l1pagetable, l2pagetable,
 		    KERNEL_BASE + logical,
 		    physical_start + logical, totalsize - textsize,
-		    AP_KRW, PT_CACHEABLE);
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 
 #if 0 /* XXX No symbols yet. */
-		logical += map_chunk(l1pagetable, l2pagetable,
+		logical += pmap_map_chunk(l1pagetable, l2pagetable,
 		    KERNEL_BASE + logical,
 		    physical_start + logical, kernexec->a_syms + sizeof(int)
 		    + *(u_int *)((int)end + kernexec->a_syms + sizeof(int)),
-		    AP_KRW, PT_CACHEABLE);
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 #endif
 	}
 
@@ -632,23 +634,29 @@ initarm(void *arg)
 #endif
 
 	/* Map the stack pages */
-	map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
-	    IRQ_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
-	    ABT_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
-	    UND_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
-	    UPAGES * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
-	    PD_SIZE, AP_KRW, 0);
+	pmap_map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
+	    IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
+	    ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
+	    UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
+	    UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
+	    PD_SIZE, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE);
 
 	/* Map the Mini-Data cache clean area. */
-	map_chunk(0, l2pagetable, minidataclean.pv_va, minidataclean.pv_pa,
-	    NBPG, AP_KRW, PT_CACHEABLE);
+	pmap_map_chunk(0, l2pagetable, minidataclean.pv_va, minidataclean.pv_pa,
+	    NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 
 	/* Map the page table that maps the kernel pages */
-	map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
+	pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Map entries in the page table used to map PTE's
@@ -656,23 +664,28 @@ initarm(void *arg)
 	 */
 	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
 	l2pagetable = kernel_ptpt.pv_pa;
-	map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_KERNEL]);
-	map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
-	    kernel_ptpt.pv_pa);
-	map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_SYS]);
+	pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_KERNEL], VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
+	    kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_SYS], VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
-		map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
+		pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
 		    (loop * 0x00400000)) >> (PGSHIFT-2)),
-		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
+		    kernel_pt_table[KERNEL_PT_VMDATA + loop],
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Map the system page in the kernel page table for the bottom 1Meg
 	 * of the virtual memory map.
 	 */
 	l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
-	map_entry(l2pagetable, 0x00000000, systempage.pv_pa);
+	pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 
 	/*
 	 * Map devices we can map w/ section mappings.
@@ -687,9 +700,11 @@ initarm(void *arg)
 		    l1_sec_table[loop].va);
 #endif
 		for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_SEC_SIZE)
-			map_section(l1pagetable, l1_sec_table[loop].va + sz,
+			pmap_map_section(l1pagetable,
+			    l1_sec_table[loop].va + sz,
 			    l1_sec_table[loop].pa + sz,
-			    l1_sec_table[loop].flags);
+			    l1_sec_table[loop].prot,
+			    l1_sec_table[loop].cache);
 		++loop;
 	}
 
@@ -704,8 +719,9 @@ initarm(void *arg)
 	    I80312_PCI_XLATE_PIOW_BASE + I80312_PCI_XLATE_IOSIZE - 1,
 	    IQ80310_PIOW_VBASE);
 #endif
-	map_chunk(0, l2pagetable, IQ80310_PIOW_VBASE,
-	    I80312_PCI_XLATE_PIOW_BASE, I80312_PCI_XLATE_IOSIZE, AP_KRW, 0);
+	pmap_map_chunk(0, l2pagetable, IQ80310_PIOW_VBASE,
+	    I80312_PCI_XLATE_PIOW_BASE, I80312_PCI_XLATE_IOSIZE,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 #ifdef VERBOSE_INIT_ARM
 	printf("Mapping SIOW 0x%08lx -> 0x%08lx @ 0x%08lx\n",
@@ -713,8 +729,9 @@ initarm(void *arg)
 	    I80312_PCI_XLATE_SIOW_BASE + I80312_PCI_XLATE_IOSIZE - 1,
 	    IQ80310_SIOW_VBASE);
 #endif
-	map_chunk(0, l2pagetable, IQ80310_SIOW_VBASE,
-	    I80312_PCI_XLATE_SIOW_BASE, I80312_PCI_XLATE_IOSIZE, AP_KRW, 0);
+	pmap_map_chunk(0, l2pagetable, IQ80310_SIOW_VBASE,
+	    I80312_PCI_XLATE_SIOW_BASE, I80312_PCI_XLATE_IOSIZE,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 #ifdef VERBOSE_INIT_ARM
 	printf("Mapping 80312 0x%08lx -> 0x%08lx @ 0x%08lx\n",
@@ -722,8 +739,9 @@ initarm(void *arg)
 	    I80312_PMMR_BASE + I80312_PMMR_SIZE - 1,
 	    IQ80310_80312_VBASE);
 #endif
-	map_chunk(0, l2pagetable, IQ80310_80312_VBASE,
-	    I80312_PMMR_BASE, I80312_PMMR_SIZE, AP_KRW, 0);
+	pmap_map_chunk(0, l2pagetable, IQ80310_80312_VBASE,
+	    I80312_PMMR_BASE, I80312_PMMR_SIZE,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Give the XScale global cache clean code an appropriately
Index: hpcarm/hpcarm/hpc_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/hpcarm/hpcarm/hpc_machdep.c,v
retrieving revision 1.26
diff -u -p -r1.26 hpc_machdep.c
--- hpcarm/hpcarm/hpc_machdep.c	2002/01/30 00:40:20	1.26
+++ hpcarm/hpcarm/hpc_machdep.c	2002/02/02 00:17:56
@@ -177,16 +177,6 @@ extern vaddr_t sa11x0_idle_mem;
 void physcon_display_base	__P((u_int addr));
 void consinit		__P((void));
 
-void map_section	__P((vaddr_t pt, vaddr_t va, vaddr_t pa,
-			     int cacheable));
-void map_pagetable	__P((vaddr_t pt, vaddr_t va, vaddr_t pa));
-void map_entry		__P((vaddr_t pt, vaddr_t va, vaddr_t pa));
-void map_entry_nc	__P((vaddr_t pt, vaddr_t va, vaddr_t pa));
-void map_entry_ro	__P((vaddr_t pt, vaddr_t va, vaddr_t pa));
-vm_size_t map_chunk	__P((vaddr_t pd, vaddr_t pt, vaddr_t va,
-			     vaddr_t pa, vm_size_t size, u_int acc,
-			     u_int flg));
-
 void data_abort_handler		__P((trapframe_t *frame));
 void prefetch_abort_handler	__P((trapframe_t *frame));
 void undefinedinstruction_bounce	__P((trapframe_t *frame));
@@ -506,17 +496,17 @@ initarm(argc, argv, bi)
 	l1pagetable = kernel_l1pt.pv_pa;
 
 	/* Map the L2 pages tables in the L1 page table */
-	map_pagetable(l1pagetable, 0x00000000,
+	pmap_map_l2pt(l1pagetable, 0x00000000,
 	    kernel_pt_table[KERNEL_PT_SYS]);
-	map_pagetable(l1pagetable, KERNEL_SPACE_START,
+	pmap_map_l2pt(l1pagetable, KERNEL_SPACE_START,
 	    kernel_pt_table[KERNEL_PT_KERNEL]);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
-		map_pagetable(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
+		pmap_map_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
 		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
-	map_pagetable(l1pagetable, PROCESS_PAGE_TBLS_BASE,
+	pmap_map_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
 	    kernel_ptpt.pv_pa);
 #define SAIPIO_BASE		0xd0000000		/* XXX XXX */
-	map_pagetable(l1pagetable, SAIPIO_BASE,
+	pmap_map_l2pt(l1pagetable, SAIPIO_BASE,
 	    kernel_pt_table[KERNEL_PT_IO]);
 
 
@@ -533,17 +523,19 @@ initarm(argc, argv, bi)
 	 */
 #if 0
 	if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
-		logical = map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
+		logical = pmap_map_chunk(l1pagetable, l2pagetable,
+		    KERNEL_TEXT_BASE,
 		    physical_start, kernexec->a_text,
-		    AP_KR, PT_CACHEABLE);
-		logical += map_chunk(l1pagetable, l2pagetable,
+		    VM_PROT_READ, PTE_PROTO_CACHE);
+		logical += pmap_map_chunk(l1pagetable, l2pagetable,
 		    KERNEL_TEXT_BASE + logical, physical_start + logical,
-		    kerneldatasize - kernexec->a_text, AP_KRW, PT_CACHEABLE);
+		    kerneldatasize - kernexec->a_text,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 	} else
 #endif
-		map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
+		pmap_map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
 		    KERNEL_TEXT_BASE, kerneldatasize,
-		    AP_KRW, PT_CACHEABLE);
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 
 #ifdef VERBOSE_INIT_ARM
 	printf("Constructing L2 page tables\n");
@@ -551,22 +543,28 @@ initarm(argc, argv, bi)
 
 	/* Map the stack pages */
 	l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
-	map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
-	    IRQ_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
-	    ABT_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
-	    UND_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
-	    UPAGES * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
-	    PD_SIZE, AP_KRW, 0);
+	pmap_map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
+	    IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
+	    ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
+	    UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
+	    UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
+	    PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Map the page table that maps the kernel pages */
-	map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
+	pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Map a page for entering idle mode */
-	map_entry_nc(l2pagetable, sa11x0_idle_mem, sa11x0_idle_mem);
+	pmap_map_entry(l2pagetable, sa11x0_idle_mem, sa11x0_idle_mem,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Map entries in the page table used to map PTE's
@@ -574,39 +572,47 @@ initarm(argc, argv, bi)
 	 */
 	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
 	l2pagetable = kernel_ptpt.pv_pa;
-	map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_SYS]);
-	map_entry_nc(l2pagetable, (KERNEL_SPACE_START >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_KERNEL]);
-	map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_KERNEL]);
+	pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_SYS],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (KERNEL_SPACE_START >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_KERNEL],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_KERNEL],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) {
-		map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
+		pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
 		    (loop * 0x00400000)) >> (PGSHIFT-2)),
-		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
+		    kernel_pt_table[KERNEL_PT_VMDATA + loop],
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	}
-	map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
-	    kernel_ptpt.pv_pa);
-	map_entry_nc(l2pagetable, (SAIPIO_BASE >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_IO]);
+	pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
+	    kernel_ptpt.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (SAIPIO_BASE >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_IO],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Map the system page in the kernel page table for the bottom 1Meg
 	 * of the virtual memory map.
 	 */
 	l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
-	map_entry(l2pagetable, 0x0000000, systempage.pv_pa);
+	pmap_map_entry(l2pagetable, 0x0000000, systempage.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 
 	/* Map any I/O modules here, as we don't have real bus_space_map() */
 	printf("mapping IO...");
 	l2pagetable = kernel_pt_table[KERNEL_PT_IO];
-	map_entry_nc(l2pagetable, SACOM3_BASE, SACOM3_HW_BASE);
+	pmap_map_entry(l2pagetable, SACOM3_BASE, SACOM3_HW_BASE,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 #ifdef CPU_SA110
 	l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
-	map_chunk(0, l2pagetable, sa110_cache_clean_addr,
+	pmap_map_chunk(0, l2pagetable, sa110_cache_clean_addr,
 	    0xe0000000, CPU_SA110_CACHE_CLEAN_SIZE,
-	    AP_KRW, PT_CACHEABLE);
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 #endif
 	/*
 	 * Now we have the real page tables in place so we can switch to them.
@@ -775,7 +781,8 @@ rpc_sa110_cc_setup(void)
 	(void) pmap_extract(pmap_kernel(), KERNEL_TEXT_BASE, &kaddr);
 	for (loop = 0; loop < CPU_SA110_CACHE_CLEAN_SIZE; loop += NBPG) {
 		pte = pmap_pte(pmap_kernel(), (sa110_cc_base + loop));
-		*pte = L2_PTE(kaddr, AP_KR);
+		*pte = kaddr | pte_proto(PTE_PROTO_KERNEL,
+		    VM_PROT_READ, PTE_PROTO_CACHE);
 	}
 	sa110_cache_clean_addr = sa110_cc_base;
 	sa110_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
Index: hpcarm/sa11x0/sa11x0_io.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/hpcarm/sa11x0/sa11x0_io.c,v
retrieving revision 1.7
diff -u -p -r1.7 sa11x0_io.c
--- hpcarm/sa11x0/sa11x0_io.c	2001/11/23 17:23:44	1.7
+++ hpcarm/sa11x0/sa11x0_io.c	2002/02/02 00:17:59
@@ -167,10 +167,11 @@ sa11x0_bs_map(t, bpa, size, cacheable, b
 	for(pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
 		pte = pmap_pte(pmap_kernel(), va);
-		if (cacheable)
-			*pte |= PT_CACHEABLE;
-		else
-			*pte &= ~PT_CACHEABLE;
+		if (cacheable == 0)
+			*pte = (*pte & PG_FRAME) |
+			    pmap_pte_proto(pmap_kernel(),
+					   VM_PROT_READ|VM_PROT_WRITE,
+					   PTE_PROTO_NOCACHE);
 	}
 	pmap_update(pmap_kernel());
 
Index: netwinder/netwinder/netwinder_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/netwinder/netwinder/netwinder_machdep.c,v
retrieving revision 1.15
diff -u -p -r1.15 netwinder_machdep.c
--- netwinder/netwinder/netwinder_machdep.c	2002/01/25 19:19:30	1.15
+++ netwinder/netwinder/netwinder_machdep.c	2002/02/02 00:18:09
@@ -160,16 +160,6 @@ int fcomcndetach __P((void));
 
 void isa_netwinder_init __P((u_int iobase, u_int membase));
 
-void map_section	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa,
-			     int cacheable));
-void map_pagetable	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-void map_entry		__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-void map_entry_nc	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-void map_entry_ro	__P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
-vm_size_t map_chunk	__P((vm_offset_t pd, vm_offset_t pt, vm_offset_t va,
-			     vm_offset_t pa, vm_size_t size, u_int acc,
-			     u_int flg));
-
 void process_kernel_args	__P((char *));
 void data_abort_handler		__P((trapframe_t *frame));
 void prefetch_abort_handler	__P((trapframe_t *frame));
@@ -307,30 +297,38 @@ struct l1_sec_map {
 	vm_offset_t	va;
 	vm_offset_t	pa;
 	vm_size_t	size;
-	int		flags;
+	int		prot;
+	int		cache;
 } l1_sec_table[] = {
 	/* Map 1MB for CSR space */
 	{ DC21285_ARMCSR_VBASE,			DC21285_ARMCSR_BASE,
-	    DC21285_ARMCSR_VSIZE,		0 },
+	    DC21285_ARMCSR_VSIZE,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
 	/* Map 1MB for fast cache cleaning space */
 	{ DC21285_CACHE_FLUSH_VBASE,		DC21285_SA_CACHE_FLUSH_BASE,
-	    DC21285_CACHE_FLUSH_VSIZE,		1 },
+	    DC21285_CACHE_FLUSH_VSIZE,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE },
 	/* Map 1MB for PCI IO space */
 	{ DC21285_PCI_IO_VBASE,			DC21285_PCI_IO_BASE,
-	    DC21285_PCI_IO_VSIZE,		0 },
+	    DC21285_PCI_IO_VSIZE,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
 	/* Map 1MB for PCI IACK space */
 	{ DC21285_PCI_IACK_VBASE,		DC21285_PCI_IACK_SPECIAL,
-	    DC21285_PCI_IACK_VSIZE,		0 },
+	    DC21285_PCI_IACK_VSIZE,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
 	/* Map 16MB of type 1 PCI config access */
 	{ DC21285_PCI_TYPE_1_CONFIG_VBASE,	DC21285_PCI_TYPE_1_CONFIG,
-	    DC21285_PCI_TYPE_1_CONFIG_VSIZE,	0 },
+	    DC21285_PCI_TYPE_1_CONFIG_VSIZE,	VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
 	/* Map 16MB of type 0 PCI config access */
 	{ DC21285_PCI_TYPE_0_CONFIG_VBASE,	DC21285_PCI_TYPE_0_CONFIG,
-	    DC21285_PCI_TYPE_0_CONFIG_VSIZE,	0 },
+	    DC21285_PCI_TYPE_0_CONFIG_VSIZE,	VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
 	/* Map 1MB of 32 bit PCI address space for ISA MEM accesses via PCI */
 	{ DC21285_PCI_ISA_MEM_VBASE,		DC21285_PCI_MEM_BASE,
-	    DC21285_PCI_ISA_MEM_VSIZE,		0 },
-	{ 0, 0, 0, 0 }
+	    DC21285_PCI_ISA_MEM_VSIZE,		VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_NOCACHE },
+	{ 0, 0, 0, 0, 0 }
 };
 
 /*
@@ -565,14 +563,14 @@ initarm(bootinfo)
 	l1pagetable = kernel_l1pt.pv_pa;
 
 	/* Map the L2 pages tables in the L1 page table */
-	map_pagetable(l1pagetable, 0x00000000,
+	pmap_map_l2pt(l1pagetable, 0x00000000,
 	    kernel_pt_table[KERNEL_PT_SYS]);
-	map_pagetable(l1pagetable, KERNEL_BASE,
+	pmap_map_l2pt(l1pagetable, KERNEL_BASE,
 	    kernel_pt_table[KERNEL_PT_KERNEL]);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
-		map_pagetable(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
+		pmap_map_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
 		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
-	map_pagetable(l1pagetable, PROCESS_PAGE_TBLS_BASE,
+	pmap_map_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
 	    kernel_ptpt.pv_pa);
 
 #ifdef VERBOSE_INIT_ARM
@@ -593,25 +591,27 @@ initarm(bootinfo)
 		 */
 		textsize = textsize & ~PGOFSET;
 		totalsize = (totalsize + PGOFSET) & ~PGOFSET;
-		logical  = map_chunk(0, l2pagetable, KERNEL_BASE,
+		logical  = pmap_map_chunk(0, l2pagetable, KERNEL_BASE,
 		    physical_start, KERNEL_TEXT_BASE - KERNEL_BASE,
-		    AP_KRW, PT_CACHEABLE);
-		logical += map_chunk(0, l2pagetable, KERNEL_BASE + logical,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
+		logical += pmap_map_chunk(0, l2pagetable, KERNEL_BASE + logical,
 		    physical_start + logical, textsize,
-		    AP_KRW, PT_CACHEABLE);
-		logical += map_chunk(0, l2pagetable, KERNEL_BASE + logical,
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
+		logical += pmap_map_chunk(0, l2pagetable, KERNEL_BASE + logical,
 		    physical_start + logical, totalsize - textsize,
-		    AP_KRW, PT_CACHEABLE);
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 #if 0
-		logical += map_chunk(0, l2pagetable, KERNEL_BASE + logical,
+		logical += pmap_map_chunk(0, l2pagetable, KERNEL_BASE + logical,
 		    physical_start + logical, kernexec->a_syms + sizeof(int)
 		    + *(u_int *)((int)end + kernexec->a_syms + sizeof(int)),
-		    AP_KRW, PT_CACHEABLE);
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 #endif
 	}
 #else
-	map_section(l1pagetable, 0xf0000000, 0x00000000, 1);
-	map_section(l1pagetable, 0xf0100000, 0x00100000, 1);
+	pmap_map_section(l1pagetable, 0xf0000000, 0x00000000,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
+	pmap_map_section(l1pagetable, 0xf0100000, 0x00100000,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 #endif
 #if 0
 	/*
@@ -636,19 +636,24 @@ initarm(bootinfo)
 #endif
 
 	/* Map the stack pages */
-	map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
-	    IRQ_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
-	    ABT_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
-	    UND_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
-	    UPAGES * NBPG, AP_KRW, PT_CACHEABLE);
-	map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
-	    PD_SIZE, AP_KRW, 0);
+	pmap_map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
+	    IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
+	    ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
+	    UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
+	    UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE,
+	    PTE_PROTO_CACHE);
+	pmap_map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
+	    PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/* Map the page table that maps the kernel pages */
-	map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
+	pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Map entries in the page table used to map PTE's
@@ -656,23 +661,28 @@ initarm(bootinfo)
 	 */
 	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
 	l2pagetable = kernel_ptpt.pv_pa;
-	map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_KERNEL]);
-	map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
-	    kernel_ptpt.pv_pa);
-	map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
-	    kernel_pt_table[KERNEL_PT_SYS]);
+	pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_KERNEL],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
+	    kernel_ptpt.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
+	pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
+	    kernel_pt_table[KERNEL_PT_SYS],
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
-		map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
+		pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
 		    (loop * 0x00400000)) >> (PGSHIFT-2)),
-		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
+		    kernel_pt_table[KERNEL_PT_VMDATA + loop],
+		    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_NOCACHE);
 
 	/*
 	 * Map the system page in the kernel page table for the bottom 1Meg
 	 * of the virtual memory map.
 	 */
 	l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
-	map_entry(l2pagetable, 0x00000000, systempage.pv_pa);
+	pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
+	    VM_PROT_READ|VM_PROT_WRITE, PTE_PROTO_CACHE);
 
 	/* Map the core memory needed before autoconfig */
 	loop = 0;
@@ -685,9 +695,11 @@ initarm(bootinfo)
 		    l1_sec_table[loop].va);
 #endif
 		for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_SEC_SIZE)
-			map_section(l1pagetable, l1_sec_table[loop].va + sz,
+			pmap_map_section(l1pagetable,
+			    l1_sec_table[loop].va + sz,
 			    l1_sec_table[loop].pa + sz,
-			    l1_sec_table[loop].flags);
+			    l1_sec_table[loop].prot,
+			    l1_sec_table[loop].cache);
 		++loop;
 	}
 

--Q0rSlbzrZN6k9QnT--