Subject: Fixing the PT bootstrap problem
To: None <port-arm@netbsd.org>
From: Jason R Thorpe <thorpej@wasabisystems.com>
List: port-arm
Date: 02/21/2002 12:11:42
--k1lZvvs/B4yU6o8G
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline

Hi folks...

As many of you know, the early page table bootstrap process in the ARM
port isn't very tidy.  Systems which can boot large kernels (e.g. cats)
have hacks in there to ensure that the correct L2 tables are used when
building up the address space.

In an effort to address the problem of booting a large (with ramdisk)
kernel on an XScale eval board, I decided to solve this problem in a
generic way.  The changes to make it work are pretty small after all
of the bootstrap API munging I've done over the past couple of days.

Summary:

	* pmap_link_l2pt() now takes a pv_addr_t instead of a paddr_t
	  as a handle for the L2 table.  The pv_addr_t's "pv_va" is
	  filled in with an address that is accessible during early
	  bootstrap.  This is a physical address on most platforms,
	  but on some, it isn't.  pmap_link_l2pt() insterts the L2
	  table into the L1 table, and also places the pv_addr_t on
	  a list of kernel L2 tables.  This list is valid only during
	  bootstrap.

	* pmap_map_chunk() no longer takes an L2 table argument.  Instead,
	  it does the following:

		* If not using a section mapping, checks to make sure
		  there's an L2 table in the L1 slot for the current
		  VA, and panic's if there is not (this change is already
		  in the tree).  This is nice, because now if you are going
		  to overflow the L2 table, you get this:

		      panic: pmap_map_chunk: no L2 table for VA 0xa0600000

		  ...rather than just hanging.

		* Using the L2 table PA in the L1 entry, look up the L2
		  table for the current VA, and use that.

...this allows machdep code to plop more L2 tables in the right places
before calling pmap_map_chunk().  In a perfect world, the L2 table allocation
would be fully dynamic, but that's a hairy mess, and I didn't want to try
and attack it right now.

Attached are the diffs to the pmap and the IQ80310 machdep code that
implement this.  I'll make similar changes to the other ports, and commit
it.

-- 
        -- Jason R. Thorpe <thorpej@wasabisystems.com>

--k1lZvvs/B4yU6o8G
Content-Type: text/plain; charset=us-ascii
Content-Description: l2boot.patch
Content-Disposition: attachment; filename=foo

Index: arm/arm32/pmap.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/arm32/pmap.c,v
retrieving revision 1.45
diff -c -r1.45 pmap.c
*** arm/arm32/pmap.c	2002/02/21 06:36:11	1.45
--- arm/arm32/pmap.c	2002/02/21 19:52:33
***************
*** 3723,3728 ****
--- 3723,3749 ----
  /************************ Bootstrapping routines ****************************/
  
  /*
+  * This list exists for the benefit of pmap_map_chunk().  It keeps track
+  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
+  * find them as necessary.
+  *
+  * Note that the data on this list is not valid after initarm() returns.
+  */
+ SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
+ 
+ static vaddr_t
+ kernel_pt_lookup(paddr_t pa)
+ {
+ 	pv_addr_t *pv;
+ 
+ 	SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
+ 		if (pv->pv_pa == pa)
+ 			return (pv->pv_va);
+ 	}
+ 	return (0);
+ }
+ 
+ /*
   * pmap_map_section:
   *
   *	Create a single section mapping.
***************
*** 3767,3783 ****
   *	page table at the slot for "va".
   */
  void
! pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, paddr_t l2pa)
  {
  	pd_entry_t *pde = (pd_entry_t *) l1pt;
  	u_int slot = va >> PDSHIFT;
  
! 	KASSERT((l2pa & PGOFSET) == 0);
  
! 	pde[slot + 0] = L1_PTE(l2pa + 0x000);
! 	pde[slot + 1] = L1_PTE(l2pa + 0x400);
! 	pde[slot + 2] = L1_PTE(l2pa + 0x800);
! 	pde[slot + 3] = L1_PTE(l2pa + 0xc00);
  }
  
  /*
--- 3788,3806 ----
   *	page table at the slot for "va".
   */
  void
! pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
  {
  	pd_entry_t *pde = (pd_entry_t *) l1pt;
  	u_int slot = va >> PDSHIFT;
+ 
+ 	KASSERT((l2pv->pv_pa & PGOFSET) == 0);
  
! 	pde[slot + 0] = L1_PTE(l2pv->pv_pa + 0x000);
! 	pde[slot + 1] = L1_PTE(l2pv->pv_pa + 0x400);
! 	pde[slot + 2] = L1_PTE(l2pv->pv_pa + 0x800);
! 	pde[slot + 3] = L1_PTE(l2pv->pv_pa + 0xc00);
  
! 	SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
  }
  
  /*
***************
*** 3788,3800 ****
   *	provided L1 and L2 tables at the specified virtual address.
   */
  vsize_t
! pmap_map_chunk(vaddr_t l1pt, vaddr_t l2pt, vaddr_t va, paddr_t pa,
!     vsize_t size, int prot, int cache)
  {
  	pd_entry_t *pde = (pd_entry_t *) l1pt;
- 	pt_entry_t *pte = (pt_entry_t *) l2pt;
  	pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
  	pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
  	vsize_t resid;  
  	int i;
  
--- 3811,3823 ----
   *	provided L1 and L2 tables at the specified virtual address.
   */
  vsize_t
! pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
!     int prot, int cache)
  {
  	pd_entry_t *pde = (pd_entry_t *) l1pt;
  	pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
  	pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
+ 	pt_entry_t *pte;
  	vsize_t resid;  
  	int i;
  
***************
*** 3830,3837 ****
  		 * for the current VA.
  		 */
  		if ((pde[va >> PDSHIFT] & L1_MASK) != L1_PAGE)
! 			panic("pmap_map_chunk: no L2 table for VA 0x%08lx\n",
! 			    va);
  
  		/* See if we can use a L2 large page mapping. */
  		if (((pa | va) & (L2_LPAGE_SIZE - 1)) == 0 &&
--- 3853,3865 ----
  		 * for the current VA.
  		 */
  		if ((pde[va >> PDSHIFT] & L1_MASK) != L1_PAGE)
! 			panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
! 
! 		pte = (pt_entry_t *)
! 		    kernel_pt_lookup(pde[va >> PDSHIFT] & PG_FRAME);
! 		if (pte == NULL)
! 			panic("pmap_map_chunk: can't find L2 table for VA"
! 			    "0x%08lx", va);
  
  		/* See if we can use a L2 large page mapping. */
  		if (((pa | va) & (L2_LPAGE_SIZE - 1)) == 0 &&
***************
*** 3840,3852 ****
  			printf("L");
  #endif
  			for (i = 0; i < 16; i++) {
- #ifdef cats	/* XXXJRT */
- 				pte[((va >> PGSHIFT) & 0x7f0) + i] =
- 				    L2_LPTE(pa, ap, fl);
- #else
  				pte[((va >> PGSHIFT) & 0x3f0) + i] =
  				    L2_LPTE(pa, ap, fl);
- #endif
  			}
  			va += L2_LPAGE_SIZE;
  			pa += L2_LPAGE_SIZE;
--- 3868,3875 ----
***************
*** 3858,3868 ****
  #ifdef VERBOSE_INIT_ARM
  		printf("P");
  #endif
- #ifdef cats	/* XXXJRT */
- 		pte[(va >> PGSHIFT) & 0x7ff] = L2_SPTE(pa, ap, fl);
- #else
  		pte[(va >> PGSHIFT) & 0x3ff] = L2_SPTE(pa, ap, fl);
- #endif
  		va += NBPG;
  		pa += NBPG;
  		resid -= NBPG;
--- 3881,3887 ----
Index: arm/include/arm32/pmap.h
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/arm/include/arm32/pmap.h,v
retrieving revision 1.27
diff -c -r1.27 pmap.h
*** arm/include/arm32/pmap.h	2002/02/21 02:52:21	1.27
--- arm/include/arm32/pmap.h	2002/02/21 19:52:34
***************
*** 123,130 ****
   * entry address for each page hook.
   */
  typedef struct {
!         vaddr_t va;
!         pt_entry_t *pte;
  } pagehook_t;
  
  /*
--- 123,130 ----
   * entry address for each page hook.
   */
  typedef struct {
! 	vaddr_t va;
! 	pt_entry_t *pte;
  } pagehook_t;
  
  /*
***************
*** 132,138 ****
   * during bootstrapping) we need to keep track of the physical and virtual
   * addresses of various pages
   */
! typedef struct {
  	paddr_t pv_pa;
  	vaddr_t pv_va;
  } pv_addr_t;
--- 132,139 ----
   * during bootstrapping) we need to keep track of the physical and virtual
   * addresses of various pages
   */
! typedef struct pv_addr {
! 	SLIST_ENTRY(pv_addr) pv_list;
  	paddr_t pv_pa;
  	vaddr_t pv_va;
  } pv_addr_t;
***************
*** 188,196 ****
  /* Bootstrapping routines. */
  void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
  void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
! vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, vaddr_t, paddr_t, vsize_t,
! 	    int, int);
! void	pmap_link_l2pt(vaddr_t, vaddr_t, paddr_t);
  
  /*
   * Special page zero routine for use by the idle loop (no cache cleans). 
--- 189,196 ----
  /* Bootstrapping routines. */
  void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
  void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
! vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
! void	pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
  
  /*
   * Special page zero routine for use by the idle loop (no cache cleans). 
Index: evbarm/iq80310/iq80310_machdep.c
===================================================================
RCS file: /cvsroot/syssrc/sys/arch/evbarm/iq80310/iq80310_machdep.c,v
retrieving revision 1.26
diff -c -r1.26 iq80310_machdep.c
*** evbarm/iq80310/iq80310_machdep.c	2002/02/21 05:25:25	1.26
--- evbarm/iq80310/iq80310_machdep.c	2002/02/21 19:52:35
***************
*** 169,175 ****
  #define	KERNEL_PT_VMDATA_NUM	(KERNEL_VM_SIZE >> (PDSHIFT + 2))
  #define NUM_KERNEL_PTS		(KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
  
! pt_entry_t kernel_pt_table[NUM_KERNEL_PTS];
  
  struct user *proc0paddr;
  
--- 169,175 ----
  #define	KERNEL_PT_VMDATA_NUM	(KERNEL_VM_SIZE >> (PDSHIFT + 2))
  #define NUM_KERNEL_PTS		(KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
  
! pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
  
  struct user *proc0paddr;
  
***************
*** 525,531 ****
  		    && kernel_l1pt.pv_pa == 0) {
  			valloc_pages(kernel_l1pt, PD_SIZE / NBPG);
  		} else {
! 			alloc_pages(kernel_pt_table[loop1], PT_SIZE / NBPG);
  			++loop1;
  		}
  	}
--- 525,534 ----
  		    && kernel_l1pt.pv_pa == 0) {
  			valloc_pages(kernel_l1pt, PD_SIZE / NBPG);
  		} else {
! 			alloc_pages(kernel_pt_table[loop1].pv_pa,
! 			    PT_SIZE / NBPG);
! 			kernel_pt_table[loop1].pv_va =
! 			    kernel_pt_table[loop1].pv_pa;
  			++loop1;
  		}
  	}
***************
*** 590,612 ****
  
  	/* Map the L2 pages tables in the L1 page table */
  	pmap_link_l2pt(l1pagetable, 0x00000000,
! 	    kernel_pt_table[KERNEL_PT_SYS]);
  	pmap_link_l2pt(l1pagetable, KERNEL_BASE,
! 	    kernel_pt_table[KERNEL_PT_KERNEL]);
  	pmap_link_l2pt(l1pagetable, IQ80310_IOPXS_VBASE,
! 	    kernel_pt_table[KERNEL_PT_IOPXS]);
  	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
  		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
! 		    kernel_pt_table[KERNEL_PT_VMDATA + loop]);
! 	pmap_link_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
! 	    kernel_ptpt.pv_pa);
  
  #ifdef VERBOSE_INIT_ARM
  	printf("Mapping kernel\n");
  #endif
  
  	/* Now we fill in the L2 pagetable for the kernel static code/data */
! 	l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
  
  	{
  		extern char etext[], _end[];
--- 593,614 ----
  
  	/* Map the L2 pages tables in the L1 page table */
  	pmap_link_l2pt(l1pagetable, 0x00000000,
! 	    &kernel_pt_table[KERNEL_PT_SYS]);
  	pmap_link_l2pt(l1pagetable, KERNEL_BASE,
! 	    &kernel_pt_table[KERNEL_PT_KERNEL]);
  	pmap_link_l2pt(l1pagetable, IQ80310_IOPXS_VBASE,
! 	    &kernel_pt_table[KERNEL_PT_IOPXS]);
  	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
  		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
! 		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
! 	pmap_link_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE, &kernel_ptpt);
  
  #ifdef VERBOSE_INIT_ARM
  	printf("Mapping kernel\n");
  #endif
  
  	/* Now we fill in the L2 pagetable for the kernel static code/data */
! 	l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL].pv_va;
  
  	{
  		extern char etext[], _end[];
***************
*** 622,639 ****
  		/*
  		 * This maps the kernel text/data/bss VA==PA.
  		 */
! 		logical += pmap_map_chunk(l1pagetable, l2pagetable,
! 		    KERNEL_BASE + logical,
  		    physical_start + logical, textsize,
  		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
! 		logical += pmap_map_chunk(l1pagetable, l2pagetable,
! 		    KERNEL_BASE + logical,
  		    physical_start + logical, totalsize - textsize,
  		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  
  #if 0 /* XXX No symbols yet. */
! 		logical += pmap_map_chunk(l1pagetable, l2pagetable,
! 		    KERNEL_BASE + logical,
  		    physical_start + logical, kernexec->a_syms + sizeof(int)
  		    + *(u_int *)((int)end + kernexec->a_syms + sizeof(int)),
  		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
--- 624,638 ----
  		/*
  		 * This maps the kernel text/data/bss VA==PA.
  		 */
! 		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
  		    physical_start + logical, textsize,
  		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
! 		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
  		    physical_start + logical, totalsize - textsize,
  		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  
  #if 0 /* XXX No symbols yet. */
! 		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
  		    physical_start + logical, kernexec->a_syms + sizeof(int)
  		    + *(u_int *)((int)end + kernexec->a_syms + sizeof(int)),
  		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
***************
*** 645,670 ****
  #endif
  
  	/* Map the stack pages */
! 	pmap_map_chunk(l1pagetable, l2pagetable, irqstack.pv_va,
! 	    irqstack.pv_pa, IRQ_STACK_SIZE * NBPG,
! 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
! 	pmap_map_chunk(l1pagetable, l2pagetable, abtstack.pv_va,
! 	    abtstack.pv_pa, ABT_STACK_SIZE * NBPG,
! 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
! 	pmap_map_chunk(l1pagetable, l2pagetable, undstack.pv_va,
! 	    undstack.pv_pa, UND_STACK_SIZE * NBPG,
! 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
! 	pmap_map_chunk(l1pagetable, l2pagetable, kernelstack.pv_va,
! 	    kernelstack.pv_pa, UPAGES * NBPG,
! 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  
! 	pmap_map_chunk(l1pagetable, l2pagetable, kernel_l1pt.pv_va,
! 	    kernel_l1pt.pv_pa, PD_SIZE,
! 	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  
  	/* Map the Mini-Data cache clean area. */
! 	pmap_map_chunk(l1pagetable, l2pagetable, minidataclean.pv_va,
! 	    minidataclean.pv_pa, NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  
  	/* Map the page table that maps the kernel pages */
  	pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
--- 644,664 ----
  #endif
  
  	/* Map the stack pages */
! 	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
! 	    IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
! 	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
! 	    ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
! 	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
! 	    UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
! 	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
! 	    UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  
! 	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
! 	    PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  
  	/* Map the Mini-Data cache clean area. */
! 	pmap_map_chunk(l1pagetable, minidataclean.pv_va, minidataclean.pv_pa,
! 	    NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  
  	/* Map the page table that maps the kernel pages */
  	pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
***************
*** 677,701 ****
  	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
  	l2pagetable = kernel_ptpt.pv_pa;
  	pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
! 	    kernel_pt_table[KERNEL_PT_KERNEL],
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  	pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
  	    kernel_ptpt.pv_pa,
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  	pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
! 	    kernel_pt_table[KERNEL_PT_SYS],
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
  		pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
  		    (loop * 0x00400000)) >> (PGSHIFT-2)),
! 		    kernel_pt_table[KERNEL_PT_VMDATA + loop],
  		    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  
  	/*
  	 * Map the system page in the kernel page table for the bottom 1Meg
  	 * of the virtual memory map.
  	 */
! 	l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
  	pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  
--- 671,695 ----
  	/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
  	l2pagetable = kernel_ptpt.pv_pa;
  	pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
! 	    kernel_pt_table[KERNEL_PT_KERNEL].pv_pa,
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  	pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
  	    kernel_ptpt.pv_pa,
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  	pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
! 	    kernel_pt_table[KERNEL_PT_SYS].pv_pa,
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
  		pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
  		    (loop * 0x00400000)) >> (PGSHIFT-2)),
! 		    kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa,
  		    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  
  	/*
  	 * Map the system page in the kernel page table for the bottom 1Meg
  	 * of the virtual memory map.
  	 */
! 	l2pagetable = kernel_pt_table[KERNEL_PT_SYS].pv_va;
  	pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  
***************
*** 724,737 ****
  	 * Map the PCI I/O spaces and i80312 registers.  These are too
  	 * small to be mapped w/ section mappings.
  	 */
! 	l2pagetable = kernel_pt_table[KERNEL_PT_IOPXS];
  #ifdef VERBOSE_INIT_ARM
  	printf("Mapping PIOW 0x%08lx -> 0x%08lx @ 0x%08lx\n",
  	    I80312_PCI_XLATE_PIOW_BASE,
  	    I80312_PCI_XLATE_PIOW_BASE + I80312_PCI_XLATE_IOSIZE - 1,
  	    IQ80310_PIOW_VBASE);
  #endif
! 	pmap_map_chunk(l1pagetable, l2pagetable, IQ80310_PIOW_VBASE,
  	    I80312_PCI_XLATE_PIOW_BASE, I80312_PCI_XLATE_IOSIZE,
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  
--- 718,731 ----
  	 * Map the PCI I/O spaces and i80312 registers.  These are too
  	 * small to be mapped w/ section mappings.
  	 */
! 	l2pagetable = kernel_pt_table[KERNEL_PT_IOPXS].pv_va;
  #ifdef VERBOSE_INIT_ARM
  	printf("Mapping PIOW 0x%08lx -> 0x%08lx @ 0x%08lx\n",
  	    I80312_PCI_XLATE_PIOW_BASE,
  	    I80312_PCI_XLATE_PIOW_BASE + I80312_PCI_XLATE_IOSIZE - 1,
  	    IQ80310_PIOW_VBASE);
  #endif
! 	pmap_map_chunk(l1pagetable, IQ80310_PIOW_VBASE,
  	    I80312_PCI_XLATE_PIOW_BASE, I80312_PCI_XLATE_IOSIZE,
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  
***************
*** 741,747 ****
  	    I80312_PCI_XLATE_SIOW_BASE + I80312_PCI_XLATE_IOSIZE - 1,
  	    IQ80310_SIOW_VBASE);
  #endif
! 	pmap_map_chunk(l1pagetable, l2pagetable, IQ80310_SIOW_VBASE,
  	    I80312_PCI_XLATE_SIOW_BASE, I80312_PCI_XLATE_IOSIZE,
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  
--- 735,741 ----
  	    I80312_PCI_XLATE_SIOW_BASE + I80312_PCI_XLATE_IOSIZE - 1,
  	    IQ80310_SIOW_VBASE);
  #endif
! 	pmap_map_chunk(l1pagetable, IQ80310_SIOW_VBASE,
  	    I80312_PCI_XLATE_SIOW_BASE, I80312_PCI_XLATE_IOSIZE,
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  
***************
*** 751,757 ****
  	    I80312_PMMR_BASE + I80312_PMMR_SIZE - 1,
  	    IQ80310_80312_VBASE);
  #endif
! 	pmap_map_chunk(l1pagetable, l2pagetable, IQ80310_80312_VBASE,
  	    I80312_PMMR_BASE, I80312_PMMR_SIZE,
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  
--- 745,751 ----
  	    I80312_PMMR_BASE + I80312_PMMR_SIZE - 1,
  	    IQ80310_80312_VBASE);
  #endif
! 	pmap_map_chunk(l1pagetable, IQ80310_80312_VBASE,
  	    I80312_PMMR_BASE, I80312_PMMR_SIZE,
  	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
  

--k1lZvvs/B4yU6o8G--