Subject: Re: Simplify pmap->uvm reporting of KVA boundaries
To: Jason Thorpe <thorpej@wasabisystems.com>
From: Jason Thorpe <thorpej@wasabisystems.com>
List: tech-kern
Date: 05/05/2003 14:45:26
--Apple-Mail-3-849482151
Content-Transfer-Encoding: 7bit
Content-Type: text/plain;
	charset=US-ASCII;
	format=flowed


On Sunday, May 4, 2003, at 11:21  AM, Jason Thorpe wrote:

> The attached patch has been tested on evbarm and i386.  I will be 
> testing it on hp300 later today (I actually expect I'll need to delete 
> some additional code from cpu_startup() on a few m68k platforms).

Here is an updated patch.  This has been booted multiuser on an HP380.

Unfortunately, I needed to add a nasty kludge to the pmap_init() 
function in the Hibler-based m68k pmaps.  However, that has more to do 
with the fragility of how those pmaps manage page tables than with this 
change.

         -- Jason R. Thorpe <thorpej@wasabisystems.com>


--Apple-Mail-3-849482151
Content-Disposition: attachment;
	filename=uvm_vspace.diff
Content-Transfer-Encoding: 7bit
Content-Type: application/octet-stream;
	x-unix-mode=0644;
	name="uvm_vspace.diff"

Index: arch/acorn26/acorn26/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/acorn26/acorn26/pmap.c,v
retrieving revision 1.7
diff -u -r1.7 pmap.c
--- arch/acorn26/acorn26/pmap.c	2003/01/17 21:55:24	1.7
+++ arch/acorn26/acorn26/pmap.c	2003/05/05 21:17:11
@@ -237,9 +237,8 @@
  * pmap_bootstrap: first-stage pmap initialisation
  * 
  * This is called very early, and has to get the pmap system into a
- * state where pmap_virtual_space and pmap_kenter_pa at least will
- * work.  If we need memory here, we have to work out how to get it
- * ourselves.
+ * state where pmap_kenter_pa at least will work.  If we need memory
+ * here, we have to work out how to get it ourselves.
  */
 void
 pmap_bootstrap(int npages, paddr_t zp_physaddr)
@@ -261,6 +260,15 @@
 		pv_table[i].pv_pflags |= PV_MODIFIED;
 #endif
 
+	/*
+	 * Define the boundaries of the managed kernel virtual address
+	 * space.  Since NetBSD/acorn26 runs the kernel from physically-
+	 * mapped space, we just return all of kernel vm.  Oh, except for
+	 * the single page at the end where we map otherwise-unmapped pages.
+	 */
+	virtual_avail = VM_MIN_KERNEL_ADDRESS;
+	virtual_end = VM_MAX_KERNEL_ADDRESS - PAGE_SIZE;
+
 	/* Set up the kernel's pmap */
 	pmap = pmap_kernel();
 	bzero(pmap, sizeof(*pmap));
@@ -288,7 +296,7 @@
 }
 
 vaddr_t
-pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
+pmap_steal_memory(vsize_t size)
 {
 	int i;
 	vaddr_t addr;
@@ -1096,25 +1104,6 @@
 	MD4Final(digest, &context);
 }
 #endif
-
-/*
- * This is meant to return the range of kernel vm that is available
- * after loading the kernel.  Since NetBSD/acorn26 runs the kernel from
- * physically-mapped space, we just return all of kernel vm.  Oh,
- * except for the single page at the end where we map
- * otherwise-unmapped pages.
- */
-void
-pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
-{
-	UVMHIST_FUNC("pmap_virtual_space");
-
-	UVMHIST_CALLED(pmaphist);
-	if (vstartp != NULL)
-		*vstartp = VM_MIN_KERNEL_ADDRESS;
-	if (vendp != NULL)
-		*vendp = VM_MAX_KERNEL_ADDRESS - PAGE_SIZE;
-}
 
 #ifdef DDB
 #include <ddb/db_output.h>
Index: arch/alpha/alpha/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/alpha/pmap.c,v
retrieving revision 1.197
diff -u -r1.197 pmap.c
--- arch/alpha/alpha/pmap.c	2003/04/01 02:18:52	1.197
+++ arch/alpha/alpha/pmap.c	2003/05/05 21:17:15
@@ -232,7 +232,7 @@
 
 paddr_t    	avail_start;	/* PA of first available physical page */
 paddr_t		avail_end;	/* PA of last available physical page */
-static vaddr_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
+static vaddr_t	pmap_max_kva;	/* VA of last mappable page */
 
 boolean_t	pmap_initialized;	/* Has pmap_init completed? */
 
@@ -365,7 +365,7 @@
  *	  lock is held.
  *
  *	* pmap_growkernel_slock - This lock protects pmap_growkernel()
- *	  and the virtual_end variable.
+ *	  and the pmap_max_kva variable.
  *
  *	  There is a lock ordering constraint for pmap_growkernel_slock.
  *	  pmap_growkernel() acquires the locks in the following order:
@@ -830,12 +830,19 @@
 	 */
 	avail_start = ptoa(vm_physmem[0].start);
 	avail_end = ptoa(vm_physmem[vm_nphysseg - 1].end);
-	virtual_end = VM_MIN_KERNEL_ADDRESS + lev3mapsize * PAGE_SIZE;
+	pmap_max_kva = VM_MIN_KERNEL_ADDRESS + lev3mapsize * PAGE_SIZE;
 
+	/*
+	 * Define the boundaries of the managed kernel virtual address
+	 * space.
+	 */
+	virtual_avail = VM_MIN_KERNEL_ADDRESS;	/* kernel is in K0SEG */
+	virtual_end = VM_MAX_KERNEL_ADDRESS;	/* we use pmap_growkernel */
+
 #if 0
 	printf("avail_start = 0x%lx\n", avail_start);
 	printf("avail_end = 0x%lx\n", avail_end);
-	printf("virtual_end = 0x%lx\n", virtual_end);
+	printf("pmap_max_kva = 0x%lx\n", pmap_max_kva);
 #endif
 
 	/*
@@ -1010,19 +1017,6 @@
 #endif /* _PMAP_MAY_USE_PROM_CONSOLE */
 
 /*
- * pmap_virtual_space:		[ INTERFACE ]
- *
- *	Define the initial bounds of the kernel virtual address space.
- */
-void
-pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
-{
-
-	*vstartp = VM_MIN_KERNEL_ADDRESS;	/* kernel is in K0SEG */
-	*vendp = VM_MAX_KERNEL_ADDRESS;		/* we use pmap_growkernel */
-}
-
-/*
  * pmap_steal_memory:		[ INTERFACE ]
  *
  *	Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
@@ -1040,13 +1034,10 @@
  *	Note that this memory will never be freed, and in essence it is wired
  *	down.
  *
- *	We must adjust *vstartp and/or *vendp iff we use address space
- *	from the kernel virtual address range defined by pmap_virtual_space().
- *
  *	Note: no locking is necessary in this function.
  */
 vaddr_t
-pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
+pmap_steal_memory(vsize_t size)
 {
 	int bank, npgs, x;
 	vaddr_t va;
@@ -3141,12 +3132,12 @@
 	vaddr_t va;
 	int l1idx;
 
-	if (maxkvaddr <= virtual_end)
+	if (maxkvaddr <= pmap_max_kva)
 		goto out;		/* we are OK */
 
 	simple_lock(&pmap_growkernel_slock);
 
-	va = virtual_end;
+	va = pmap_max_kva;
 
 	while (va < maxkvaddr) {
 		/*
@@ -3216,12 +3207,12 @@
 	/* Invalidate the L1 PT cache. */
 	pool_cache_invalidate(&pmap_l1pt_cache);
 
-	virtual_end = va;
+	pmap_max_kva = va;
 
 	simple_unlock(&pmap_growkernel_slock);
 
  out:
-	return (virtual_end);
+	return (pmap_max_kva);
 
  die:
 	panic("pmap_growkernel: out of memory");
Index: arch/amd64/amd64/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/amd64/amd64/pmap.c,v
retrieving revision 1.1
diff -u -r1.1 pmap.c
--- arch/amd64/amd64/pmap.c	2003/04/26 18:39:31	1.1
+++ arch/amd64/amd64/pmap.c	2003/05/05 21:17:19
@@ -405,17 +405,6 @@
 static boolean_t pmap_initialized = FALSE; /* pmap_init done yet? */
 
 /*
- * the following two vaddr_t's are used during system startup
- * to keep track of how much of the kernel's VM space we have used.
- * once the system is started, the management of the remaining kernel
- * VM space is turned over to the kernel_map vm_map.
- */
-
-static vaddr_t virtual_avail;	/* VA of first free KVA */
-static vaddr_t virtual_end;	/* VA of last free KVA */
-
-
-/*
  * pv_page management structures: locked by pvalloc_lock
  */
 
@@ -896,8 +885,8 @@
 	unsigned long p1i;
 
 	/*
-	 * set up our local static global vars that keep track of the
-	 * usage of KVM before kernel_map is set up
+	 * define the voundaries of the managed kernel virtual address
+	 * space.
 	 */
 
 	virtual_avail = kva_start;		/* first free KVA */
@@ -2231,20 +2220,6 @@
 	return (0);
 }
 
-
-/*
- * pmap_virtual_space: used during bootup [pmap_steal_memory] to
- *	determine the bounds of the kernel virtual addess space.
- */
-
-void
-pmap_virtual_space(startp, endp)
-	vaddr_t *startp;
-	vaddr_t *endp;
-{
-	*startp = virtual_avail;
-	*endp = virtual_end;
-}
 
 /*
  * pmap_map: map a range of PAs into kvm
Index: arch/amiga/amiga/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/amiga/amiga/pmap.c,v
retrieving revision 1.104
diff -u -r1.104 pmap.c
--- arch/amiga/amiga/pmap.c	2003/04/01 21:26:27	1.104
+++ arch/amiga/amiga/pmap.c	2003/05/05 21:17:21
@@ -286,8 +286,6 @@
 struct vm_map	pt_map_store;
 
 vsize_t		mem_size;	/* memory size in bytes */
-vaddr_t		virtual_avail;  /* VA of first avail page (after kernel bss)*/
-vaddr_t		virtual_end;	/* VA of last avail page (end of kernel AS) */
 int		page_cnt;	/* number of pages managed by the VM system */
 boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
 char		*pmap_attributes;	/* reference and modify bits */
@@ -552,56 +550,6 @@
 #endif
 
 	/*
-	 * Allocate memory for random pmap data structures.  Includes the
-	 * initial segment table, pv_head_table and pmap_attributes.
-	 */
-	for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
-		page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
-#ifdef DEBUG
-		printf("pmap_init: %2d: %08lx - %08lx (%10d)\n", bank,
-		    vm_physmem[bank].start << PGSHIFT,
-		    vm_physmem[bank].end << PGSHIFT, page_cnt << PGSHIFT);
-#endif
-	}
-	s = AMIGA_STSIZE;				/* Segtabzero */
-	s += page_cnt * sizeof(struct pv_entry);	/* pv table */
-	s += page_cnt * sizeof(char);			/* attribute table */
-	s = round_page(s);
-
-	addr = uvm_km_zalloc(kernel_map, s);
-	if (addr == 0)
-		panic("pmap_init: can't allocate data structures");
-	Segtabzero = (u_int *) addr;
-	(void) pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
-	addr += AMIGA_STSIZE;
-
-	pv_table = (pv_entry_t) addr;
-	addr += page_cnt * sizeof(struct pv_entry);
-
-	pmap_attributes = (char *) addr;
-#ifdef DEBUG
-	if (pmapdebug & PDB_INIT)
-		printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
-			"tbl %p atr %p\n",
-			s, page_cnt, Segtabzero, Segtabzeropa,
-			pv_table, pmap_attributes);
-#endif
-
-	/*
-	 * Now that the pv and attribute tables have been allocated,
-	 * assign them to the memory segments.
-	 */
-	pv = pv_table;
-	attr = pmap_attributes;
-	for (bank = 0; bank < vm_nphysseg; bank++) {
-		npg = vm_physmem[bank].end - vm_physmem[bank].start;
-		vm_physmem[bank].pmseg.pvent = pv;
-		vm_physmem[bank].pmseg.attrs = attr;
-		pv += npg;
-		attr += npg;
-	}
-
-	/*
 	 * Allocate physical memory for kernel PT pages and their management.
 	 * we need enough pages to map the page tables for each process
 	 * plus some slop.
@@ -622,7 +570,8 @@
 	 * Verify that space will be allocated in region for which
 	 * we already have kernel PT pages.
 	 */
-	addr = 0;
+	addr = vm_map_min(kernel_map);
+	kernel_map->first_free = &kernel_map->header;	/* XXX */
 	rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
 		     UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
 				 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
@@ -634,6 +583,7 @@
 	 * Now allocate the space and link the pages together to
 	 * form the KPT free list.
 	 */
+	kernel_map->first_free = &kernel_map->header;	/* XXX */
 	addr = uvm_km_zalloc(kernel_map, s);
 	if (addr == 0)
 		panic("pmap_init: cannot allocate KPT free list");
@@ -658,6 +608,57 @@
 #endif
 
 	/*
+	 * Allocate memory for random pmap data structures.  Includes the
+	 * initial segment table, pv_head_table and pmap_attributes.
+	 */
+	for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
+		page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
+#ifdef DEBUG
+		printf("pmap_init: %2d: %08lx - %08lx (%10d)\n", bank,
+		    vm_physmem[bank].start << PGSHIFT,
+		    vm_physmem[bank].end << PGSHIFT, page_cnt << PGSHIFT);
+#endif
+	}
+	s = AMIGA_STSIZE;				/* Segtabzero */
+	s += page_cnt * sizeof(struct pv_entry);	/* pv table */
+	s += page_cnt * sizeof(char);			/* attribute table */
+	s = round_page(s);
+
+	kernel_map->first_free = &kernel_map->header;	/* XXX */
+	addr = uvm_km_zalloc(kernel_map, s);
+	if (addr == 0)
+		panic("pmap_init: can't allocate data structures");
+	Segtabzero = (u_int *) addr;
+	(void) pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
+	addr += AMIGA_STSIZE;
+
+	pv_table = (pv_entry_t) addr;
+	addr += page_cnt * sizeof(struct pv_entry);
+
+	pmap_attributes = (char *) addr;
+#ifdef DEBUG
+	if (pmapdebug & PDB_INIT)
+		printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
+			"tbl %p atr %p\n",
+			s, page_cnt, Segtabzero, Segtabzeropa,
+			pv_table, pmap_attributes);
+#endif
+
+	/*
+	 * Now that the pv and attribute tables have been allocated,
+	 * assign them to the memory segments.
+	 */
+	pv = pv_table;
+	attr = pmap_attributes;
+	for (bank = 0; bank < vm_nphysseg; bank++) {
+		npg = vm_physmem[bank].end - vm_physmem[bank].start;
+		vm_physmem[bank].pmseg.pvent = pv;
+		vm_physmem[bank].pmseg.attrs = attr;
+		pv += npg;
+		attr += npg;
+	}
+
+	/*
 	 * Allocate the segment table map and the page table map.
 	 */
 	addr = amiga_uptbase;
@@ -674,6 +675,7 @@
 	} else
 		s = maxproc * AMIGA_UPTSIZE;
 
+	kernel_map->first_free = &kernel_map->header;	/* XXX */
 	pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
 	    TRUE, &pt_map_store);
 
@@ -2745,23 +2747,6 @@
 		       str, va, pg->wire_count, count);
 }
 #endif
-
-/*
- *	Routine:	pmap_virtual_space
- *
- *	Function:
- *		Report the range of available kernel virtual address
- *		space to the VM system during bootstrap.  Called by
- *		vm_bootstrap_steal_memory().
- */
-void
-pmap_virtual_space(vstartp, vendp)
-	vaddr_t     *vstartp, *vendp;
-{
-
-	*vstartp = virtual_avail;
-	*vendp = virtual_end;
-}
 
 /*
  *	Routine:	pmap_procwr
Index: arch/arm/arm32/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/arm32/pmap.c,v
retrieving revision 1.131
diff -u -r1.131 pmap.c
--- arch/arm/arm32/pmap.c	2003/04/22 00:24:49	1.131
+++ arch/arm/arm32/pmap.c	2003/05/05 21:17:25
@@ -312,8 +312,6 @@
 extern paddr_t physical_end;
 extern int max_processes;
 
-vaddr_t virtual_avail;
-vaddr_t virtual_end;
 vaddr_t pmap_curmaxkvaddr;
 
 vaddr_t avail_start;
@@ -1013,6 +1011,10 @@
 	pmap_kernel()->pm_obj.uo_npages = 0;
 	pmap_kernel()->pm_obj.uo_refs = 1;
 
+	/*
+	 * Define the boundaries of the managed kernel virtual address
+	 * space.
+	 */
 	virtual_avail = KERNEL_VM_BASE;
 	virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE;
 
@@ -1639,21 +1641,6 @@
 	simple_lock(&pmap->pm_lock);
 	pmap->pm_obj.uo_refs++;
 	simple_unlock(&pmap->pm_lock);
-}
-
-/*
- * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
- *
- * Return the start and end addresses of the kernel's virtual space.
- * These values are setup in pmap_bootstrap and are updated as pages
- * are allocated.
- */
-
-void
-pmap_virtual_space(vaddr_t *start, vaddr_t *end)
-{
-	*start = virtual_avail;
-	*end = virtual_end;
 }
 
 /*
Index: arch/arm/arm32/pmap_new.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/arm32/pmap_new.c,v
retrieving revision 1.10
diff -u -r1.10 pmap_new.c
--- arch/arm/arm32/pmap_new.c	2003/05/03 16:18:57	1.10
+++ arch/arm/arm32/pmap_new.c	2003/05/05 21:17:29
@@ -527,8 +527,6 @@
 /*
  * Misc variables
  */
-vaddr_t virtual_avail;
-vaddr_t virtual_end;
 vaddr_t pmap_curmaxkvaddr;
 
 vaddr_t avail_start;
@@ -3445,20 +3443,6 @@
 #endif /* ARM_MMU_XSCALE == 1 */
 
 /*
- * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
- *
- * Return the start and end addresses of the kernel's virtual space.
- * These values are setup in pmap_bootstrap and are updated as pages
- * are allocated.
- */
-void
-pmap_virtual_space(vaddr_t *start, vaddr_t *end)
-{
-	*start = virtual_avail;
-	*end = virtual_end;
-}
-
-/*
  * Helper function for pmap_grow_l2_bucket()
  */
 static __inline int
@@ -3913,6 +3897,9 @@
 	 * virtual_avail (note that there are no pages mapped at these VAs).
 	 *
 	 * Managed KVM space start from wherever initarm() tells us.
+	 *
+	 * Note that virtual_avail and virtual_end define the boundaries
+	 * of the managed kernel virtual address space.
 	 */
 	virtual_avail = vstart;
 	virtual_end = vend;
Index: arch/atari/atari/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/atari/atari/machdep.c,v
retrieving revision 1.122
diff -u -r1.122 machdep.c
--- arch/atari/atari/machdep.c	2003/04/26 11:05:08	1.122
+++ arch/atari/atari/machdep.c	2003/05/05 21:17:30
@@ -190,7 +190,6 @@
 void
 cpu_startup()
 {
-	extern	 void		etext __P((void));
 	extern	 int		iomem_malloc_safe;
 		 caddr_t	v;
 		 u_int		i, base, residual;
@@ -293,26 +292,6 @@
 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
 				 FALSE, NULL);
-
-	/*
-	 * Tell the VM system that page 0 isn't mapped.
-	 *
-	 * XXX This is bogus; should just fix KERNBASE and
-	 * XXX VM_MIN_KERNEL_ADDRESS, but not right now.
-	 */
-	if (uvm_map_protect(kernel_map, 0, PAGE_SIZE, UVM_PROT_NONE, TRUE) != 0)
-		panic("can't mark page 0 off-limits");
-
-	/*
-	 * Tell the VM system that writing to kernel text isn't allowed.
-	 * If we don't, we might end up COW'ing the text segment!
-	 *
-	 * XXX Should be m68k_trunc_page(&kernel_text) instead
-	 * XXX of PAGE_SIZE.
-	 */
-	if (uvm_map_protect(kernel_map, PAGE_SIZE, m68k_round_page(&etext),
-	    UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != 0)
-		panic("can't protect kernel text");
 
 #ifdef DEBUG
 	pmapdebug = opmapdebug;
Index: arch/atari/atari/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/atari/atari/pmap.c,v
retrieving revision 1.81
diff -u -r1.81 pmap.c
--- arch/atari/atari/pmap.c	2003/04/01 23:47:02	1.81
+++ arch/atari/atari/pmap.c	2003/05/05 21:17:33
@@ -1,3 +1,4 @@
+/*	$NetBSD$	*/
 
 /*-
  * Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -282,8 +283,6 @@
 
 vsize_t		mem_size;	/* memory size in bytes */
 paddr_t		avail_end;	/* PA of last available physical page */
-vaddr_t		virtual_avail;  /* VA of first avail page (after kernel bss)*/
-vaddr_t		virtual_end;	/* VA of last avail page (end of kernel AS) */
 int		page_cnt;	/* number of pages managed by the VM system */
 boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
 char		*pmap_attributes;	/* reference and modify bits */
@@ -505,55 +504,6 @@
 #endif
 
 	/*
-	 * Allocate memory for random pmap data structures.  Includes the
-	 * initial segment table, pv_head_table and pmap_attributes.
-	 */
-	for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
-		page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
-#ifdef DEBUG
-		printf("pmap_init: %2d: %08lx - %08lx (%10d)\n", bank,
-		    vm_physmem[bank].start << PGSHIFT,
-		    vm_physmem[bank].end << PGSHIFT, page_cnt << PGSHIFT);
-#endif
-	}
-	s = ATARI_STSIZE;				/* Segtabzero	   */
-	s += page_cnt * sizeof(struct pv_entry);	/* pv table	   */
-	s += page_cnt * sizeof(char);			/* attribute table */
-	s = round_page(s);
-
-	addr = uvm_km_zalloc(kernel_map, s);
-	if (addr == 0)
-		panic("pmap_init: can't allocate data structures");
-	Segtabzero   = (u_int *) addr;
-	(void) pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
-	addr += ATARI_STSIZE;
-	pv_table = (pv_entry_t) addr;
-	addr += page_cnt * sizeof(struct pv_entry);
-
-	pmap_attributes = (char *) addr;
-#ifdef DEBUG
-	if (pmapdebug & PDB_INIT)
-		printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
-			"tbl %p atr %p\n",
-			s, page_cnt, Segtabzero, Segtabzeropa,
-			pv_table, pmap_attributes);
-#endif
-
-	/*
-	 * Now that the pv and attribute tables have been allocated,
-	 * assign them to the memory segments.
-	 */
-	pv = pv_table;
-	attr = pmap_attributes;
-	for (bank = 0; bank < vm_nphysseg; bank++) {
-		npg = vm_physmem[bank].end - vm_physmem[bank].start;
-		vm_physmem[bank].pmseg.pvent = pv;
-		vm_physmem[bank].pmseg.attrs = attr;
-		pv += npg;
-		attr += npg;
-	}
-
-	/*
 	 * Allocate physical memory for kernel PT pages and their management.
 	 * we need enough pages to map the page tables for each process 
 	 * plus some slop.
@@ -574,7 +524,8 @@
 	 * Verify that space will be allocated in region for which
 	 * we already have kernel PT pages.
 	 */
-	addr = 0;
+	addr = vm_map_min(kernel_map);
+	kernel_map->first_free = &kernel_map->header;	/* XXX */
 	rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
 		     UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
 				 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
@@ -610,6 +561,56 @@
 #endif
 
 	/*
+	 * Allocate memory for random pmap data structures.  Includes the
+	 * initial segment table, pv_head_table and pmap_attributes.
+	 */
+	for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
+		page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
+#ifdef DEBUG
+		printf("pmap_init: %2d: %08lx - %08lx (%10d)\n", bank,
+		    vm_physmem[bank].start << PGSHIFT,
+		    vm_physmem[bank].end << PGSHIFT, page_cnt << PGSHIFT);
+#endif
+	}
+	s = ATARI_STSIZE;				/* Segtabzero	   */
+	s += page_cnt * sizeof(struct pv_entry);	/* pv table	   */
+	s += page_cnt * sizeof(char);			/* attribute table */
+	s = round_page(s);
+
+	kernel_map->first_free = &kernel_map->header;
+	addr = uvm_km_zalloc(kernel_map, s);
+	if (addr == 0)
+		panic("pmap_init: can't allocate data structures");
+	Segtabzero   = (u_int *) addr;
+	(void) pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
+	addr += ATARI_STSIZE;
+	pv_table = (pv_entry_t) addr;
+	addr += page_cnt * sizeof(struct pv_entry);
+
+	pmap_attributes = (char *) addr;
+#ifdef DEBUG
+	if (pmapdebug & PDB_INIT)
+		printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
+			"tbl %p atr %p\n",
+			s, page_cnt, Segtabzero, Segtabzeropa,
+			pv_table, pmap_attributes);
+#endif
+
+	/*
+	 * Now that the pv and attribute tables have been allocated,
+	 * assign them to the memory segments.
+	 */
+	pv = pv_table;
+	attr = pmap_attributes;
+	for (bank = 0; bank < vm_nphysseg; bank++) {
+		npg = vm_physmem[bank].end - vm_physmem[bank].start;
+		vm_physmem[bank].pmseg.pvent = pv;
+		vm_physmem[bank].pmseg.attrs = attr;
+		pv += npg;
+		attr += npg;
+	}
+
+	/*
 	 * Slightly modified version of kmem_suballoc() to get page table
 	 * map where we want it.
 	 */
@@ -627,6 +628,7 @@
 	}
 	else s = maxproc * ATARI_UPTSIZE;
 
+	kernel_map->first_free = &kernel_map->header;
 	pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
 	    TRUE, &pt_map_store);
 
@@ -2633,23 +2635,6 @@
 		TBIAU();
 	pmap->pm_ptpages++;
 	splx(s);
-}
-
-/*
- *	Routine:	pmap_virtual_space
- *
- *	Function:
- *		Report the range of available kernel virtual address
- *		space to the VM system during bootstrap.  Called by
- *		vm_bootstrap_steal_memory().
- */
-void
-pmap_virtual_space(vstartp, vendp)
-	vaddr_t     *vstartp, *vendp;
-{
-
-	*vstartp = virtual_avail;
-	*vendp = virtual_end;
 }
 
 /*
Index: arch/cesfic/cesfic/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/cesfic/cesfic/machdep.c,v
retrieving revision 1.15
diff -u -r1.15 machdep.c
--- arch/cesfic/cesfic/machdep.c	2003/04/26 11:05:09	1.15
+++ arch/cesfic/cesfic/machdep.c	2003/05/05 21:17:34
@@ -255,7 +255,6 @@
 void
 cpu_startup()
 {
-	extern char *etext;
 	caddr_t v;
 	int i, base, residual;
 	vaddr_t minaddr, maxaddr;
@@ -353,14 +352,6 @@
 	printf("avail mem = %ld\n", ptoa(uvmexp.free));
 	printf("using %u buffers containing %d bytes of memory\n",
 		nbuf, bufpages * PAGE_SIZE);
-
-	/*
-	 * Tell the VM system that writing to kernel text isn't allowed.
-	 * If we don't, we might end up COW'ing the text segment!
-	 */
-	if (uvm_map_protect(kernel_map, KERNBASE, m68k_round_page(&etext),
-	    UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != 0)
-		panic("can't protect kernel text");
 
 	/*
 	 * Set up buffers, so they can be used to read disk labels.
Index: arch/hp300/hp300/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/hp300/hp300/machdep.c,v
retrieving revision 1.166
diff -u -r1.166 machdep.c
--- arch/hp300/hp300/machdep.c	2003/04/26 11:05:23	1.166
+++ arch/hp300/hp300/machdep.c	2003/05/05 21:17:39
@@ -285,7 +285,6 @@
 void
 cpu_startup()
 {
-	extern char *etext;
 	caddr_t v;
 	u_int i, base, residual;
 	vaddr_t minaddr, maxaddr;
@@ -391,26 +390,6 @@
 	printf("avail memory = %s\n", pbuf);
 	format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
-
-	/*
-	 * Tell the VM system that page 0 isn't mapped.
-	 *
-	 * XXX This is bogus; should just fix KERNBASE and
-	 * XXX VM_MIN_KERNEL_ADDRESS, but not right now.
-	 */
-	if (uvm_map_protect(kernel_map, 0, PAGE_SIZE, UVM_PROT_NONE, TRUE) != 0)
-		panic("can't mark page 0 off-limits");
-
-	/*
-	 * Tell the VM system that writing to kernel text isn't allowed.
-	 * If we don't, we might end up COW'ing the text segment!
-	 *
-	 * XXX Should be m68k_trunc_page(&kernel_text) instead
-	 * XXX of PAGE_SIZE.
-	 */
-	if (uvm_map_protect(kernel_map, PAGE_SIZE, m68k_round_page(&etext),
-	    UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != 0)
-		panic("can't protect kernel text");
 
 	/*
 	 * Set up CPU-specific registers, cache, etc.
Index: arch/hppa/hppa/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/hppa/hppa/pmap.c,v
retrieving revision 1.6
diff -u -r1.6 pmap.c
--- arch/hppa/hppa/pmap.c	2003/04/01 20:50:12	1.6
+++ arch/hppa/hppa/pmap.c	2003/05/05 21:17:43
@@ -235,7 +235,7 @@
 #endif
 #define PMAP_PRINTF(v,x) PMAP_PRINTF_MASK(v,v,x)
 
-vaddr_t	virtual_steal, virtual_start, virtual_end;
+vaddr_t	virtual_steal;
 
 /* These two virtual pages are available for copying and zeroing. */
 static vaddr_t tmp_vpages[2];
@@ -1054,7 +1054,7 @@
 	 * virtual space.
 	 */
 	*vstart = btlb_entry_start[btlb_j - 1] + btlb_entry_size[btlb_j - 1];
-	virtual_start = *vstart;
+	virtual_avail = *vstart;
 
 	/*
 	 * Finally, load physical pages into UVM.  There are
@@ -1106,27 +1106,19 @@
  *	directly mapped cannot be grown dynamically once allocated.
  */
 vaddr_t
-pmap_steal_memory(size, startp, endp)
+pmap_steal_memory(size)
 	vsize_t size;
-	vaddr_t *startp;
-	vaddr_t *endp;
 {
 	vaddr_t va;
 	int lcv;
 
 	PMAP_PRINTF(PDB_STEAL, ("(%lx, %p, %p)\n", size, startp, endp));
 
-	/* Remind the caller of the start and end of virtual space. */
-	if (startp)
-		*startp = virtual_start;
-	if (endp)
-		*endp = virtual_end;
-
 	/* Round the allocation up to a page. */
 	size = hppa_round_page(size);
 
 	/* We must panic if we cannot steal the memory. */
-	if (size > virtual_start - virtual_steal)
+	if (size > virtual_avail - virtual_steal)
 		panic("pmap_steal_memory: out of memory");
 
 	/* Steal the memory. */
@@ -1143,17 +1135,6 @@
 		panic("pmap_steal_memory inconsistency");
 
 	return va;
-}
-
-/* 
- * How much virtual space does this kernel have?
- * (After mapping kernel text, data, etc.)
- */
-void
-pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
-{
-	*vstartp = virtual_start;
-	*vendp = virtual_end;
 }
 
 /*
Index: arch/i386/i386/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/i386/i386/pmap.c,v
retrieving revision 1.151
diff -u -r1.151 pmap.c
--- arch/i386/i386/pmap.c	2003/04/01 20:54:23	1.151
+++ arch/i386/i386/pmap.c	2003/05/05 21:17:47
@@ -361,17 +361,6 @@
 static boolean_t pmap_initialized = FALSE; /* pmap_init done yet? */
 
 /*
- * the following two vaddr_t's are used during system startup
- * to keep track of how much of the kernel's VM space we have used.
- * once the system is started, the management of the remaining kernel
- * VM space is turned over to the kernel_map vm_map.
- */
-
-static vaddr_t virtual_avail;	/* VA of first free KVA */
-static vaddr_t virtual_end;	/* VA of last free KVA */
-
-
-/*
  * pv_page management structures: locked by pvalloc_lock
  */
 
@@ -820,8 +809,8 @@
 	int i;
 
 	/*
-	 * set up our local static global vars that keep track of the
-	 * usage of KVM before kernel_map is set up
+	 * define the boundaries of the managed kernel virtual address
+	 * space.
 	 */
 
 	virtual_avail = kva_start;		/* first free KVA */
@@ -958,7 +947,7 @@
 	pte += X86_MAXPROCS * NPTECL;
 #else
 	csrcp = (caddr_t) virtual_avail;  csrc_pte = pte;  /* allocate */
-	virtual_avail += PAGE_SIZE; pte++;			     /* advance */
+	virtual_avail += PAGE_SIZE; pte++;		   /* advance */
 
 	cdstp = (caddr_t) virtual_avail;  cdst_pte = pte;
 	virtual_avail += PAGE_SIZE; pte++;
@@ -1978,20 +1967,6 @@
 	return (0);
 }
 
-
-/*
- * pmap_virtual_space: used during bootup [pmap_steal_memory] to
- *	determine the bounds of the kernel virtual addess space.
- */
-
-void
-pmap_virtual_space(startp, endp)
-	vaddr_t *startp;
-	vaddr_t *endp;
-{
-	*startp = virtual_avail;
-	*endp = virtual_end;
-}
 
 /*
  * pmap_map: map a range of PAs into kvm
Index: arch/luna68k/luna68k/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/luna68k/luna68k/machdep.c,v
retrieving revision 1.28
diff -u -r1.28 machdep.c
--- arch/luna68k/luna68k/machdep.c	2003/04/26 11:05:14	1.28
+++ arch/luna68k/luna68k/machdep.c	2003/05/05 21:17:48
@@ -101,7 +101,6 @@
 struct cpu_info cpu_info_store;
 
 extern char kernel_text[];
-extern char etext[];
 
 struct vm_map *exec_map = NULL;  
 struct vm_map *mb_map = NULL;
@@ -347,26 +346,6 @@
 	printf("avail memory = %s\n", pbuf);
 	format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
-
-	/*
-	 * Tell the VM system that the area before the text segment
-	 * is invalid.
-	 *
-	 * XXX Should just change KERNBASE and VM_MIN_KERNEL_ADDRESS,
-	 * XXX but not right now.
-	 */
-	if (uvm_map_protect(kernel_map, 0, round_page((vaddr_t)&kernel_text),
-	    UVM_PROT_NONE, TRUE) != 0)
-		panic("can't mark pre-text pages off-limits");
-
-	/*
-	 * Tell the VM system that writing to kernel text isn't allowed.
-	 * If we don't, we might end up COW'ing the text segment!
-	 */
-	if (uvm_map_protect(kernel_map, trunc_page((vaddr_t)&kernel_text),
-	    trunc_page((vaddr_t)&etext), UVM_PROT_READ|UVM_PROT_EXEC, TRUE)
-	    != 0)
-		panic("can't protect kernel text");
 
 	/*
 	 * Set up buffers, so they can be used to read disk labels.
Index: arch/m68k/m68k/pmap_motorola.c
===================================================================
RCS file: /cvsroot/src/sys/arch/m68k/m68k/pmap_motorola.c,v
retrieving revision 1.3
diff -u -r1.3 pmap_motorola.c
--- arch/m68k/m68k/pmap_motorola.c	2003/04/02 00:00:46	1.3
+++ arch/m68k/m68k/pmap_motorola.c	2003/05/05 21:17:51
@@ -256,8 +256,6 @@
 paddr_t		avail_start;	/* PA of first available physical page */
 paddr_t		avail_end;	/* PA of last available physical page */
 vsize_t		mem_size;	/* memory size in bytes */
-vaddr_t		virtual_avail;  /* VA of first avail page (after kernel bss)*/
-vaddr_t		virtual_end;	/* VA of last avail page (end of kernel AS) */
 int		page_cnt;	/* number of pages managed by VM system */
 
 boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
@@ -331,26 +329,6 @@
 #define	PRM_KEEPPTPAGE	0x04
 
 /*
- * pmap_virtual_space:		[ INTERFACE ]
- *
- *	Report the range of available kernel virtual address
- *	space to the VM system during bootstrap.
- *
- *	This is only an interface function if we do not use
- *	pmap_steal_memory()!
- *
- *	Note: no locking is necessary in this function.
- */
-void
-pmap_virtual_space(vstartp, vendp)
-	vaddr_t	*vstartp, *vendp;
-{
-
-	*vstartp = virtual_avail;
-	*vendp = virtual_end;
-}
-
-/*
  * pmap_init:			[ INTERFACE ]
  *
  *	Initialize the pmap module.  Called by vm_init(), to initialize any
@@ -407,6 +385,53 @@
 	    avail_start, avail_end, virtual_avail, virtual_end));
 
 	/*
+	 * Allocate physical memory for kernel PT pages and their management.
+	 * We need 1 PT page per possible task plus some slop.
+	 */
+	npages = min(atop(M68K_MAX_KPTSIZE), maxproc+16);
+	s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
+
+	/*
+	 * Verify that space will be allocated in region for which
+	 * we already have kernel PT pages.
+	 */
+	addr = vm_map_min(kernel_map);
+	kernel_map->first_free = &kernel_map->header;	/* XXX */
+	rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
+		     UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+				 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
+	if (rv != 0)
+		panic("pmap_init: uvm_map of KPT array failed: %d", rv);
+	if ((addr + s) >= (vaddr_t)Sysmap)
+		panic("pmap_init: initial kernel PT too small for KPT array "
+		    "(0x%lx - 0x%lx)", addr, s);
+	uvm_unmap(kernel_map, addr, addr + s);
+
+	/*
+	 * Now allocate the space and link the pages together to
+	 * form the KPT free list.
+	 */
+	kernel_map->first_free = &kernel_map->header;	/* XXX */
+	addr = uvm_km_zalloc(kernel_map, s);
+	if (addr == 0)
+		panic("pmap_init: cannot allocate KPT free list");
+	s = ptoa(npages);
+	addr2 = addr + s;
+	kpt_pages = &((struct kpt_page *)addr2)[npages];
+	kpt_free_list = NULL;
+	do {
+		addr2 -= PAGE_SIZE;
+		(--kpt_pages)->kpt_next = kpt_free_list;
+		kpt_free_list = kpt_pages;
+		kpt_pages->kpt_va = addr2;
+		(void) pmap_extract(pmap_kernel(), addr2,
+		    (paddr_t *)&kpt_pages->kpt_pa);
+	} while (addr != addr2);
+
+	PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
+	    atop(s), addr, addr + s));
+
+	/*
 	 * Allocate memory for random pmap data structures.  Includes the
 	 * initial segment table, pv_head_table and pmap_attributes.
 	 */
@@ -416,6 +441,7 @@
 	s += page_cnt * sizeof(struct pv_entry);	/* pv table */
 	s += page_cnt * sizeof(char);			/* attribute table */
 	s = round_page(s);
+	kernel_map->first_free = &kernel_map->header;	/* XXX */
 	addr = uvm_km_zalloc(kernel_map, s);
 	if (addr == 0)
 		panic("pmap_init: can't allocate data structures");
@@ -449,51 +475,10 @@
 	}
 
 	/*
-	 * Allocate physical memory for kernel PT pages and their management.
-	 * We need 1 PT page per possible task plus some slop.
-	 */
-	npages = min(atop(M68K_MAX_KPTSIZE), maxproc+16);
-	s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
-
-	/*
-	 * Verify that space will be allocated in region for which
-	 * we already have kernel PT pages.
-	 */
-	addr = 0;
-	rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
-		     UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
-				 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
-	if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
-		panic("pmap_init: kernel PT too small");
-	uvm_unmap(kernel_map, addr, addr + s);
-
-	/*
-	 * Now allocate the space and link the pages together to
-	 * form the KPT free list.
-	 */
-	addr = uvm_km_zalloc(kernel_map, s);
-	if (addr == 0)
-		panic("pmap_init: cannot allocate KPT free list");
-	s = ptoa(npages);
-	addr2 = addr + s;
-	kpt_pages = &((struct kpt_page *)addr2)[npages];
-	kpt_free_list = NULL;
-	do {
-		addr2 -= PAGE_SIZE;
-		(--kpt_pages)->kpt_next = kpt_free_list;
-		kpt_free_list = kpt_pages;
-		kpt_pages->kpt_va = addr2;
-		(void) pmap_extract(pmap_kernel(), addr2,
-		    (paddr_t *)&kpt_pages->kpt_pa);
-	} while (addr != addr2);
-
-	PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
-	    atop(s), addr, addr + s));
-
-	/*
 	 * Allocate the segment table map and the page table map.
 	 */
 	s = maxproc * M68K_STSIZE;
+	kernel_map->first_free = &kernel_map->header;	/* XXX */
 	st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, FALSE,
 	    &st_map_store);
 
@@ -509,6 +494,7 @@
 		maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
 	} else
 		s = (maxproc * M68K_MAX_PTSIZE);
+	kernel_map->first_free = &kernel_map->header;	/* XXX */
 	pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
 	    TRUE, &pt_map_store);
 
Index: arch/mac68k/mac68k/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mac68k/mac68k/machdep.c,v
retrieving revision 1.286
diff -u -r1.286 machdep.c
--- arch/mac68k/mac68k/machdep.c	2003/04/26 11:05:14	1.286
+++ arch/mac68k/mac68k/machdep.c	2003/05/05 21:17:54
@@ -382,8 +382,6 @@
 void
 cpu_startup(void)
 {
-	extern char *start;
-	extern char *etext;
 	caddr_t v;
 	int vers;
 	u_int i, base, residual;
@@ -495,19 +493,6 @@
 	printf("avail memory = %s\n", pbuf);
 	format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
-
-	/*
-	 * Tell the VM system that writing to kernel text isn't allowed.
-	 * If we don't, we might end up COW'ing the text segment!
-	 *
-	 * XXX I'd like this to be m68k_trunc_page(&kernel_text) instead
-	 * XXX of the reference to &start, but we have to keep the
-	 * XXX interrupt vectors and such writable for the Mac toolbox.
-	 */
-	if (uvm_map_protect(kernel_map,
-	    m68k_trunc_page(&start + (PAGE_SIZE - 1)), m68k_round_page(&etext),
-	    (UVM_PROT_READ | UVM_PROT_EXEC), TRUE) != 0)
-		panic("can't protect kernel text");
 
 	/*
 	 * Set up CPU-specific registers, cache, etc.
Index: arch/mips/mips/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/mips/pmap.c,v
retrieving revision 1.146
diff -u -r1.146 pmap.c
--- arch/mips/mips/pmap.c	2003/01/17 23:36:17	1.146
+++ arch/mips/mips/pmap.c	2003/05/05 21:17:56
@@ -190,7 +190,6 @@
 
 paddr_t avail_start;	/* PA of first available physical page */
 paddr_t avail_end;	/* PA of last available physical page */
-vaddr_t virtual_end;	/* VA of last avail page (end of kernel AS) */
 
 struct pv_entry	*pv_table;
 int		 pv_table_npages;
@@ -331,6 +330,13 @@
 	 */
 	avail_start = ptoa(vm_physmem[0].start);
 	avail_end = ptoa(vm_physmem[vm_nphysseg - 1].end);
+
+	/*
+	 * Note: we can't grow the kernel pmap, so the end of the
+	 * managed kernel virtual address space is defined by how
+	 * large we make the initial sysmap.
+	 */
+	virtual_avail = VM_MIN_KERNEL_ADDRESS;
 	virtual_end = VM_MIN_KERNEL_ADDRESS + Sysmapsize * NBPG;
 
 	/*
@@ -394,17 +400,6 @@
 }
 
 /*
- * Define the initial bounds of the kernel virtual address space.
- */
-void
-pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
-{
-
-	*vstartp = VM_MIN_KERNEL_ADDRESS;	/* kernel is in K0SEG */
-	*vendp = trunc_page(virtual_end);	/* XXX need pmap_growkernel() */
-}
-
-/*
  * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
  * This function allows for early dynamic memory allocation until the virtual
  * memory system has been bootstrapped.  After that point, either kmem_alloc
@@ -419,14 +414,10 @@
  *
  * Note that this memory will never be freed, and in essence it is wired
  * down.
- *
- * We must adjust *vstartp and/or *vendp iff we use address space
- * from the kernel virtual address range defined by pmap_virtual_space().
  */
 vaddr_t
-pmap_steal_memory(size, vstartp, vendp)
+pmap_steal_memory(size)
 	vsize_t size;
-	vaddr_t *vstartp, *vendp;
 {
 	int bank, x;
 	u_int npgs;
Index: arch/mvme68k/mvme68k/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mvme68k/mvme68k/machdep.c,v
retrieving revision 1.98
diff -u -r1.98 machdep.c
--- arch/mvme68k/mvme68k/machdep.c	2003/04/26 11:05:16	1.98
+++ arch/mvme68k/mvme68k/machdep.c	2003/05/05 21:17:58
@@ -430,7 +430,6 @@
 void
 cpu_startup()
 {
-	extern char *kernel_text, *etext;
 	caddr_t v;
 	u_int i, base, residual;
 	u_quad_t vmememsize;
@@ -551,26 +550,6 @@
 	printf("avail memory = %s\n", pbuf);
 	format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
-
-	/*
-	 * Tell the VM system that the area before the text segment
-	 * is invalid.
-	 *
-	 * XXX Should just change KERNBASE and VM_MIN_KERNEL_ADDRESS,
-	 * XXX but not right now.
-	 */
-	if (uvm_map_protect(kernel_map, 0, round_page((vaddr_t)&kernel_text),
-	    UVM_PROT_NONE, TRUE) != 0)
-		panic("can't mark pre-text pages off-limits");
-
-	/*
-	 * Tell the VM system that writing to the kernel text isn't allowed.
-	 * If we don't, we might end up COW'ing the text segment!
-	 */
-	if (uvm_map_protect(kernel_map, trunc_page((vaddr_t)&kernel_text),
-	    round_page((vaddr_t)&etext), UVM_PROT_READ|UVM_PROT_EXEC, TRUE)
-	    != 0)
-		panic("can't protect kernel text");
 
 	/*
 	 * Set up CPU-specific registers, cache, etc.
Index: arch/news68k/news68k/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/news68k/news68k/machdep.c,v
retrieving revision 1.38
diff -u -r1.38 machdep.c
--- arch/news68k/news68k/machdep.c	2003/04/26 11:05:17	1.38
+++ arch/news68k/news68k/machdep.c	2003/05/05 21:17:59
@@ -117,7 +117,6 @@
 int	safepri = PSL_LOWIPL;
 
 extern paddr_t avail_start, avail_end;
-extern char *kernel_text, *etext;
 extern int end, *esym;
 extern u_int lowram;
 
@@ -313,25 +312,6 @@
 	printf("avail memory = %s\n", pbuf);
 	format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
-
-	/*
-	 * Tell the VM system that the area before the text segment
-	 * is invalid.
-	 *
-	 * XXX This is bogus; should just fix KERNBASE and
-	 * XXX VM_MIN_KERNEL_ADDRESS, but not right now.
-	 */
-	if (uvm_map_protect(kernel_map, 0, m68k_round_page(&kernel_text),
-	    UVM_PROT_NONE, TRUE) != 0)
-		panic("can't mark pre-text pages off-limits");
-
-	/*
-	 * Tell the VM system that writing to the kernel text isn't allowed.
-	 * If we don't, we might end up COW'ing the text segment!
-	 */
-	if (uvm_map_protect(kernel_map, m68k_trunc_page(&kernel_text),
-	    m68k_round_page(&etext), UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != 0)
-		panic("can't protect kernel text");
 
 	/*
 	 * Set up CPU-specific registers, cache, etc.
Index: arch/next68k/next68k/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/next68k/next68k/machdep.c,v
retrieving revision 1.57
diff -u -r1.57 machdep.c
--- arch/next68k/next68k/machdep.c	2003/04/26 11:05:17	1.57
+++ arch/next68k/next68k/machdep.c	2003/05/05 21:18:00
@@ -283,7 +283,6 @@
 void
 cpu_startup()
 {
-	extern char *kernel_text, *etext;
 	caddr_t v;
 	u_int i, base, residual;
 	vaddr_t minaddr, maxaddr;
@@ -389,26 +388,6 @@
 	printf("avail memory = %s\n", pbuf);
 	format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
-
-	/*
-	 * Tell the VM system that the area before the text segment
-	 * is invalid.
-	 *
-	 * XXX Should just change KERNBASE and VM_MIN_KERNEL_ADDRESS,
-	 * XXX but not right now.
-	 */
-	if (uvm_map_protect(kernel_map, 0, round_page((vaddr_t)&kernel_text),
-	    UVM_PROT_NONE, TRUE) != 0)
-		panic("can't mark pre-text pages off-limits");
-
-	/*
-	 * Tell the VM system that writing to the kernel text isn't allowed.
-	 * If we don't, we might end up COW'ing the text segment!
-	 */
-	if (uvm_map_protect(kernel_map, trunc_page((vaddr_t)&kernel_text),
-	    round_page((vaddr_t)&etext), UVM_PROT_READ|UVM_PROT_EXEC, TRUE)
-	    != 0)
-		panic("can't protect kernel text");
 
 	/*
 	 * Set up CPU-specific registers, cache, etc.
Index: arch/pc532/pc532/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/pc532/pc532/machdep.c,v
retrieving revision 1.138
diff -u -r1.138 machdep.c
--- arch/pc532/pc532/machdep.c	2003/04/26 11:05:18	1.138
+++ arch/pc532/pc532/machdep.c	2003/05/05 21:18:02
@@ -148,7 +148,6 @@
 void
 cpu_startup()
 {
-	extern char kernel_text[];
 	caddr_t v;
 	int sz;
 	u_int i, base, residual;
@@ -247,16 +246,6 @@
 	 */
 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
 	    nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL);
-
-	/*
-	 * Tell the VM system that writing to kernel text isn't allowed.
-	 * If we don't, we might end up COW'ing the text segment!
-	 */
-	if (uvm_map_protect(kernel_map,
-			   ns532_round_page(&kernel_text),
-			   ns532_round_page(&etext),
-			   UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != 0)
-		panic("can't protect kernel text");
 
 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
 	printf("avail memory = %s\n", pbuf);
Index: arch/pc532/pc532/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/pc532/pc532/pmap.c,v
retrieving revision 1.70
diff -u -r1.70 pmap.c
--- arch/pc532/pc532/pmap.c	2002/09/27 15:36:34	1.70
+++ arch/pc532/pc532/pmap.c	2003/05/05 21:18:04
@@ -281,17 +281,6 @@
 static boolean_t pmap_initialized = FALSE; /* pmap_init done yet? */
 
 /*
- * the following two vaddr_t's are used during system startup
- * to keep track of how much of the kernel's VM space we have used.
- * once the system is started, the management of the remaining kernel
- * VM space is turned over to the kernel_map vm_map.
- */
-
-static vaddr_t virtual_avail;	/* VA of first free KVA */
-static vaddr_t virtual_end;	/* VA of last free KVA */
-
-
-/*
  * pv_page management structures: locked by pvalloc_lock
  */
 
@@ -645,8 +634,8 @@
 	msgbuf_paddr = avail_end;
 
 	/*
-	 * set up our local static global vars that keep track of the
-	 * usage of KVM before kernel_map is set up
+	 * define the boundaries of the managed kernel virtual address
+	 * space.
 	 */
 
 	virtual_avail = kva_start;		/* first free KVA */
@@ -1566,20 +1555,6 @@
 	return (0);
 }
 
-
-/*
- * pmap_virtual_space: used during bootup [pmap_steal_memory] to
- *	determine the bounds of the kernel virtual addess space.
- */
-
-void
-pmap_virtual_space(startp, endp)
-	vaddr_t *startp;
-	vaddr_t *endp;
-{
-	*startp = virtual_avail;
-	*endp = virtual_end;
-}
 
 /*
  * pmap_map: map a range of PAs into kvm
Index: arch/powerpc/ibm4xx/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/ibm4xx/pmap.c,v
retrieving revision 1.19
diff -u -r1.19 pmap.c
--- arch/powerpc/ibm4xx/pmap.c	2003/04/02 04:22:03	1.19
+++ arch/powerpc/ibm4xx/pmap.c	2003/05/05 21:18:06
@@ -386,6 +386,12 @@
 	pmap_kernel()->pm_ctx = KERNEL_PID;
 	nextavail = avail->start;
 
+	/*
+	 * Define the boundaries of the managed kernel virtual
+	 * address space.
+	 */
+	virtual_avail = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
+	virtual_end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
 
 	evcnt_attach_static(&tlbhit_ev);
 	evcnt_attach_static(&tlbmiss_ev);
@@ -458,25 +464,6 @@
 
 	/* Setup a pool for additional pvlist structures */
 	pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL);
-}
-
-/*
- * How much virtual space is available to the kernel?
- */
-void
-pmap_virtual_space(vaddr_t *start, vaddr_t *end)
-{
-
-#if 0
-	/*
-	 * Reserve one segment for kernel virtual memory
-	 */
-	*start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT);
-	*end = *start + SEGMENT_LENGTH;
-#else
-	*start = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
-	*end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
-#endif
 }
 
 #ifdef PMAP_GROWKERNEL
Index: arch/powerpc/oea/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/oea/pmap.c,v
retrieving revision 1.8
diff -u -r1.8 pmap.c
--- arch/powerpc/oea/pmap.c	2003/04/07 21:42:14	1.8
+++ arch/powerpc/oea/pmap.c	2003/05/05 21:18:08
@@ -1020,20 +1020,6 @@
 }
 
 /*
- * How much virtual space does the kernel get?
- */
-void
-pmap_virtual_space(vaddr_t *start, vaddr_t *end)
-{
-	/*
-	 * For now, reserve one segment (minus some overhead) for kernel
-	 * virtual memory
-	 */
-	*start = VM_MIN_KERNEL_ADDRESS;
-	*end = VM_MAX_KERNEL_ADDRESS;
-}
-
-/*
  * Allocate, initialize, and return a new physical map.
  */
 pmap_t
@@ -2545,7 +2531,7 @@
  * pmap needs and above 256MB for other stuff.
  */
 vaddr_t
-pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
+pmap_steal_memory(vsize_t vsize)
 {
 	vsize_t size;
 	vaddr_t va;
@@ -2556,9 +2542,6 @@
 	if (uvm.page_init_done == TRUE)
 		panic("pmap_steal_memory: called _after_ bootstrap");
 
-	*vstartp = VM_MIN_KERNEL_ADDRESS;
-	*vendp = VM_MAX_KERNEL_ADDRESS;
-
 	size = round_page(vsize);
 	npgs = atop(size);
 
@@ -2734,6 +2717,14 @@
 	paddr_t s, e;
 	psize_t size;
 	int i, j;
+
+	/*
+	 * Define the boundaries of the managed kernel virtual address
+	 * space.  For now, reserve one segment (minus some overhead)
+	 * for kernel virtual memory.
+	 */
+	virtual_avail = VM_MIN_KERNEL_ADDRESS;
+	virtual_end = VM_MAX_KERNEL_ADDRESS;
 
 	/*
 	 * Get memory.
Index: arch/sh3/sh3/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sh3/sh3/pmap.c,v
retrieving revision 1.43
diff -u -r1.43 pmap.c
--- arch/sh3/sh3/pmap.c	2003/04/02 02:56:41	1.43
+++ arch/sh3/sh3/pmap.c	2003/05/05 21:18:10
@@ -105,6 +105,13 @@
 	size_t sz;
 	caddr_t v;
 
+	/*
+	 * Define the boundaries of the kernel virtual address
+	 * space.
+	 */
+	virtual_avail = VM_MIN_KERNEL_ADDRESS;
+	virtual_end = VM_MAX_KERNEL_ADDRESS;
+
 	/* Steal msgbuf area */
 	initmsgbuf((caddr_t)uvm_pageboot_alloc(MSGBUFSIZE), MSGBUFSIZE);
 
@@ -131,7 +138,7 @@
 }
 
 vaddr_t
-pmap_steal_memory(vsize_t size, vaddr_t *vstart, vaddr_t *vend)
+pmap_steal_memory(vsize_t size)
 {
 	struct vm_physseg *bank;
 	int i, j, npage;
@@ -205,14 +212,6 @@
 	return (__pmap_kve);
  error:
 	panic("pmap_growkernel: out of memory.");
-}
-
-void
-pmap_virtual_space(vaddr_t *start, vaddr_t *end)
-{
-
-	*start = VM_MIN_KERNEL_ADDRESS;
-	*end = VM_MAX_KERNEL_ADDRESS;
 }
 
 void
Index: arch/sh5/include/pmap.h
===================================================================
RCS file: /cvsroot/src/sys/arch/sh5/include/pmap.h,v
retrieving revision 1.17
diff -u -r1.17 pmap.h
--- arch/sh5/include/pmap.h	2003/04/02 07:36:03	1.17
+++ arch/sh5/include/pmap.h	2003/05/05 21:18:10
@@ -87,7 +87,6 @@
 #define	PMAP_UNMAP_POOLPAGE(v)	pmap_unmap_poolpage((v))
 
 #define	PMAP_STEAL_MEMORY
-extern vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
 
 #define pmap_clear_modify(pg)		(pmap_clear_bit((pg), SH5_PTEL_M))
 #define	pmap_clear_reference(pg)	(pmap_clear_bit((pg), SH5_PTEL_R))
Index: arch/sh5/sh5/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sh5/sh5/pmap.c,v
retrieving revision 1.31
diff -u -r1.31 pmap.c
--- arch/sh5/sh5/pmap.c	2003/04/18 20:02:34	1.31
+++ arch/sh5/sh5/pmap.c	2003/05/05 21:18:13
@@ -218,7 +218,8 @@
  * 
  * E0000000 - FFFFFFFF  (KSEG1)
  * 	KSEG1 is basically the `managed' kernel virtual address space
- *	as reported to uvm(9) by pmap_virtual_space().
+ *	as reported to uvm(9) by the setting of virtual_avail and
+ *	virtual_end in pmap_bootstrap().
  * 
  * 	It uses regular TLB mappings, but backed by a dedicated IPT
  * 	with 1-1 V2Phys PTELs. The IPT will be up to 512KB (for NEFF=32)
@@ -1145,6 +1146,12 @@
 	pmap_kva_avail_start = pmap_device_kva_start +
 	    PMAP_BOOTSTRAP_DEVICE_KVA;
 
+	/*
+	 * Define the bounaries of the kernel virtual address space.
+	 */
+	virtual_avail = pmap_kva_avail_start;
+	virtual_end = SH5_KSEG1_BASE + ((KERNEL_IPT_SIZE - 1) * PAGE_SIZE);
+
 	pmap_asid_next = PMAP_ASID_USER_START;
 	pmap_asid_max = SH5_PTEH_ASID_MASK;	/* XXX Should be cpu specific */
 
@@ -1260,17 +1267,6 @@
 }
 
 /*
- * How much virtual space does the kernel get?
- */
-void
-pmap_virtual_space(vaddr_t *start, vaddr_t *end)
-{
-
-	*start = pmap_kva_avail_start;
-	*end = SH5_KSEG1_BASE + ((KERNEL_IPT_SIZE - 1) * PAGE_SIZE);
-}
-
-/*
  * Allocate, initialize, and return a new physical map.
  */
 pmap_t
@@ -2956,7 +2952,7 @@
 }
 
 vaddr_t
-pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
+pmap_steal_memory(vsize_t vsize)
 {
 	vsize_t size;
 	vaddr_t va;
Index: arch/sparc/include/pmap.h
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc/include/pmap.h,v
retrieving revision 1.66
diff -u -r1.66 pmap.h
--- arch/sparc/include/pmap.h	2003/03/02 21:37:20	1.66
+++ arch/sparc/include/pmap.h	2003/05/05 21:18:14
@@ -264,7 +264,6 @@
 void		pmap_reference __P((pmap_t));
 void		pmap_remove __P((pmap_t, vaddr_t, vaddr_t));
 #define		pmap_update(pmap)		/* nothing (yet) */
-void		pmap_virtual_space __P((vaddr_t *, vaddr_t *));
 void		pmap_redzone __P((void));
 void		kvm_uncache __P((caddr_t, int));
 struct user;
Index: arch/sparc/sparc/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc/sparc/pmap.c,v
retrieving revision 1.251
diff -u -r1.251 pmap.c
--- arch/sparc/sparc/pmap.c	2003/05/01 14:14:46	1.251
+++ arch/sparc/sparc/pmap.c	2003/05/05 21:18:20
@@ -405,8 +405,6 @@
 				   than the `etext gap' defined below */
 static vaddr_t	etext_gap_start;/* start of gap between text & data */
 static vaddr_t	etext_gap_end;	/* end of gap between text & data */
-static vaddr_t	virtual_avail;	/* first free kernel virtual address */
-static vaddr_t	virtual_end;	/* last free kernel virtual address */
 
 static void pmap_page_upload(void);
 
@@ -1011,19 +1009,6 @@
  */
 
 /*
- * How much virtual space does this kernel have?
- * (After mapping kernel text, data, etc.)
- */
-void
-pmap_virtual_space(v_start, v_end)
-        vaddr_t *v_start;
-        vaddr_t *v_end;
-{
-        *v_start = virtual_avail;
-        *v_end   = virtual_end;
-}
-
-/*
  * Helper routine that hands off available physical pages to the VM system.
  */
 static void
@@ -3135,6 +3120,9 @@
 	vmmap = p, p += NBPG;
 	p = reserve_dumppages(p);
 
+	/*
+	 * Define the bounds of the managed kernel virtual address space.
+	 */
 	virtual_avail = (vaddr_t)p;
 	virtual_end = VM_MAX_KERNEL_ADDRESS;
 
@@ -3590,6 +3578,9 @@
 			&sp->sg_pte[VA_SUN4M_VPG(cpuinfo.vpage[i])];
 	}
 
+	/*
+	 * Define the bounds of the managed kernel virtual address space.
+	 */
 	virtual_avail = (vaddr_t)p;
 	virtual_end = VM_MAX_KERNEL_ADDRESS;
 
Index: arch/sparc64/sparc64/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc64/sparc64/pmap.c,v
retrieving revision 1.138
diff -u -r1.138 pmap.c
--- arch/sparc64/sparc64/pmap.c	2003/04/01 16:34:59	1.138
+++ arch/sparc64/sparc64/pmap.c	2003/05/05 21:18:24
@@ -464,6 +464,8 @@
  * the kernel, then traverse the free memory lists to find out how big it is.
  */
 
+static vaddr_t kbreak; /* End of kernel VA */
+
 void
 pmap_bootstrap(kernelstart, kernelend, maxctx)
 	u_long kernelstart, kernelend;
@@ -1334,8 +1336,16 @@
 	avail_start = nextavail;
 	for (mp = avail; mp->size; mp++)
 		avail_end = mp->start+mp->size;
-	BDPRINTF(PDB_BOOT1, ("Finished pmap_bootstrap()\r\n"));
 
+	/*
+	 * Reserve two pages for pmap_copy_page && /dev/mem and
+	 * defined the boundaries of the kernel virtual address
+	 * space.
+	 */
+	virtual_avail = kbreak = (vaddr_t)(vmmap + 2*PAGE_SIZE);
+	virtual_end = VM_MAX_KERNEL_ADDRESS;
+
+	BDPRINTF(PDB_BOOT1, ("Finished pmap_bootstrap()\r\n"));
 }
 
 /*
@@ -1390,24 +1400,6 @@
 
 	vm_first_phys = avail_start;
 	vm_num_phys = avail_end - avail_start;
-}
-
-/*
- * How much virtual space is available to the kernel?
- */
-static vaddr_t kbreak; /* End of kernel VA */
-void
-pmap_virtual_space(start, end)
-	vaddr_t *start, *end;
-{
-
-	/*
-	 * Reserve one segment for kernel virtual memory
-	 */
-	/* Reserve two pages for pmap_copy_page && /dev/mem */
-	*start = kbreak = (vaddr_t)(vmmap + 2*PAGE_SIZE);
-	*end = VM_MAX_KERNEL_ADDRESS;
-	BDPRINTF(PDB_BOOT1, ("pmap_virtual_space: %x-%x\r\n", *start, *end));
 }
 
 /*
Index: arch/sun2/sun2/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sun2/sun2/pmap.c,v
retrieving revision 1.20
diff -u -r1.20 pmap.c
--- arch/sun2/sun2/pmap.c	2003/04/01 15:47:49	1.20
+++ arch/sun2/sun2/pmap.c	2003/05/05 21:18:27
@@ -197,7 +197,6 @@
  * These are set in pmap_bootstrap() and used in
  * pmap_next_page().
  */
-vaddr_t virtual_avail, virtual_end;
 paddr_t avail_start, avail_end;
 #define	managed(pa)	(((pa) >= avail_start) && ((pa) < avail_end))
 
@@ -1783,19 +1782,6 @@
 /*
  * Support functions for vm_page_bootstrap().
  */
-
-/*
- * How much virtual space does this kernel have?
- * (After mapping kernel text, data, etc.)
- */
-void
-pmap_virtual_space(v_start, v_end)
-	vaddr_t *v_start;
-	vaddr_t *v_end;
-{
-	*v_start = virtual_avail;
-	*v_end   = virtual_end;
-}
 
 /* Provide memory to the VM system. */
 static void
Index: arch/sun3/sun3/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sun3/sun3/pmap.c,v
retrieving revision 1.140
diff -u -r1.140 pmap.c
--- arch/sun3/sun3/pmap.c	2003/04/01 15:31:13	1.140
+++ arch/sun3/sun3/pmap.c	2003/05/05 21:18:29
@@ -206,7 +206,6 @@
  * These are set in pmap_bootstrap() and used in
  * pmap_next_page().
  */
-vaddr_t virtual_avail, virtual_end;
 paddr_t avail_start, avail_end;
 #define	managed(pa)	(((pa) >= avail_start) && ((pa) < avail_end))
 
@@ -1812,19 +1811,6 @@
 /*
  * Support functions for vm_page_bootstrap().
  */
-
-/*
- * How much virtual space does this kernel have?
- * (After mapping kernel text, data, etc.)
- */
-void
-pmap_virtual_space(v_start, v_end)
-	vaddr_t *v_start;
-	vaddr_t *v_end;
-{
-	*v_start = virtual_avail;
-	*v_end   = virtual_end;
-}
 
 /* Provide memory to the VM system. */
 static void
Index: arch/sun3/sun3x/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sun3/sun3x/pmap.c,v
retrieving revision 1.79
diff -u -r1.79 pmap.c
--- arch/sun3/sun3x/pmap.c	2003/04/01 15:28:41	1.79
+++ arch/sun3/sun3x/pmap.c	2003/05/05 21:18:33
@@ -272,8 +272,6 @@
  * XXX:  For now, retain the traditional variables that were
  * used in the old pmap/vm interface (without NONCONTIG).
  */
-/* Kernel virtual address space available: */
-vaddr_t	virtual_avail, virtual_end;
 /* Physical address space available: */
 paddr_t	avail_start, avail_end;
 
@@ -3512,19 +3510,6 @@
 	}
 }
 
-
-/* pmap_virtual_space			INTERFACE
- **
- * Return the current available range of virtual addresses in the
- * arguuments provided.  Only really called once.
- */
-void
-pmap_virtual_space(vstart, vend)
-	vaddr_t *vstart, *vend;
-{
-	*vstart = virtual_avail;
-	*vend = virtual_end;
-}
 
 /*
  * Provide memory to the VM system.
Index: arch/vax/vax/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/vax/vax/pmap.c,v
retrieving revision 1.125
diff -u -r1.125 pmap.c
--- arch/vax/vax/pmap.c	2003/04/01 15:22:53	1.125
+++ arch/vax/vax/pmap.c	2003/05/05 21:18:35
@@ -179,7 +179,6 @@
 #endif
 
 paddr_t	  avail_start, avail_end;
-vaddr_t	  virtual_avail, virtual_end; /* Available virtual memory	*/
 
 struct pv_entry *get_pventry(void);
 void free_pventry(struct pv_entry *);
@@ -289,6 +288,9 @@
 	 * amount of physical memory also, therefore sysptsize is 
 	 * a variable here that is changed dependent of the physical
 	 * memory size.
+	 *
+	 * Note that setting virtual_avail/virtual_end defines the
+	 * boundaries of the managed kernel virtual address space.
 	 */
 	virtual_avail = avail_end + KERNBASE;
 	virtual_end = KERNBASE + sysptsize * VAX_NBPG;
@@ -417,30 +419,17 @@
 }
 
 /*
- * Define the initial bounds of the kernel virtual address space.
- */
-void
-pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
-{
-
-	*vstartp = virtual_avail;
-	*vendp = virtual_end;
-}
-
-/*
  * Let the VM system do early memory allocation from the direct-mapped
  * physical memory instead.
  */
 vaddr_t
-pmap_steal_memory(size, vstartp, vendp)
+pmap_steal_memory(size)
 	vsize_t size;
-	vaddr_t *vstartp, *vendp;
 {
 	vaddr_t v;
 	int npgs;
 
-	PMDEBUG(("pmap_steal_memory: size 0x%lx start %p end %p\n",
-		    size, vstartp, vendp));
+	PMDEBUG(("pmap_steal_memory: size 0x%lx\n", size));
 
 	size = round_page(size);
 	npgs = btoc(size);
Index: uvm/uvm_extern.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_extern.h,v
retrieving revision 1.78
diff -u -r1.78 uvm_extern.h
--- uvm/uvm_extern.h	2003/05/03 19:01:06	1.78
+++ uvm/uvm_extern.h	2003/05/05 21:18:37
@@ -504,6 +504,16 @@
 extern struct vm_map *phys_map;
 
 /*
+ * these variables define the boundaries of the managed kernel virtual
+ * address space.  they are initialized by machine-dependent code during
+ * bootstrap.  note that before kernel virtual memory is initialized,
+ * some address space might be "stolen" during bootstrap.  anything that
+ * steals address space must update these variables accordingly.
+ */
+extern vaddr_t virtual_avail;
+extern vaddr_t virtual_end;
+
+/*
  * macros
  */
 
Index: uvm/uvm_init.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_init.c,v
retrieving revision 1.16
diff -u -r1.16 uvm_init.c
--- uvm/uvm_init.c	2003/03/04 06:18:54	1.16
+++ uvm/uvm_init.c	2003/05/05 21:18:37
@@ -72,7 +72,6 @@
 void
 uvm_init()
 {
-	vaddr_t kvm_start, kvm_end;
 
 	/*
 	 * step 0: ensure that the hardware set the page size
@@ -93,11 +92,9 @@
 	 * step 2: init the page sub-system.  this includes allocating the
 	 * vm_page structures, and setting up all the page queues (and
 	 * locks).  available memory will be put in the "free" queue.
-	 * kvm_start and kvm_end will be set to the area of kernel virtual
-	 * memory which is available for general use.
 	 */
 
-	uvm_page_init(&kvm_start, &kvm_end);
+	uvm_page_init();
 
 	/*
 	 * step 3: init the map sub-system.  allocates the static pool of
@@ -113,7 +110,7 @@
 	 * kmem_object.
 	 */
 
-	uvm_km_init(kvm_start, kvm_end);
+	uvm_km_init();
 
 	/*
 	 * step 5: init the pmap module.   the pmap module is free to allocate
@@ -155,8 +152,7 @@
 	 */
 
 	uvm_page_rehash();
-	uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
-	    UAO_FLAG_KERNSWAP);
+	uao_create(virtual_end - virtual_avail, UAO_FLAG_KERNSWAP);
 
 	/*
 	 * done!
Index: uvm/uvm_km.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_km.c,v
retrieving revision 1.60
diff -u -r1.60 uvm_km.c
--- uvm/uvm_km.c	2002/11/30 18:28:05	1.60
+++ uvm/uvm_km.c	2003/05/05 21:18:37
@@ -74,8 +74,8 @@
  * overview of kernel memory management:
  *
  * the kernel virtual address space is mapped by "kernel_map."   kernel_map
- * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
- * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
+ * starts at virtual_avail and goes to virtual_end.  note that virtual_avail
+ * is equal to vm_map_min(kernel_map).
  *
  * the kernel_map has several "submaps."   submaps can only appear in
  * the kernel_map (user processes can't use them).   submaps "take over"
@@ -102,8 +102,8 @@
  * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
  * are "special" and never die).   all kernel objects should be thought of
  * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
- * object is equal to the size of kernel virtual address space (i.e. the
- * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
+ * object is equal to the size of managed kernel virtual address space (i.e.
+ * the value "virtual_end - virtual_avail").
  *
  * most kernel private memory lives in kernel_object.   the only exception
  * to this is for memory that belongs to submaps that must be protected
@@ -118,9 +118,9 @@
  * offsets that are managed by the submap.
  *
  * note that the "offset" in a kernel object is always the kernel virtual
- * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
+ * address minus virtual_avail (aka vm_map_min(kernel_map)).
  * example:
- *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
+ *   suppose virtual_avail is 0xf8000000 and the kernel does a
  *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
  *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
  *   then that means that the page at offset 0x235000 in kernel_object is
@@ -148,6 +148,9 @@
  * global data structures
  */
 
+vaddr_t virtual_avail;		/* start of managed kernel virtual memory */
+vaddr_t virtual_end;		/* end of managed kernel virtual memory */
+
 struct vm_map *kernel_map = NULL;
 
 /*
@@ -160,16 +163,22 @@
  * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
  * KVM already allocated for text, data, bss, and static data structures).
  *
- * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
- *    we assume that [min -> start] has already been allocated and that
- *    "end" is the end.
+ * => KVM is defined by virtual_avail/virtual_end.
+ *    we assume that any regions that have already been allocated from
+ *    the total kernel address space have already been accounted for in
+ *    the values of virtual_avail and virtual_end.
  */
 
 void
-uvm_km_init(start, end)
-	vaddr_t start, end;
+uvm_km_init(void)
 {
-	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
+
+	/*
+	 * virtual_avail and virtual_end should already be page-aligned.
+	 */
+
+	KASSERT((virtual_avail & PAGE_MASK) == 0);
+	KASSERT((virtual_end & PAGE_MASK) == 0);
 
 	/*
 	 * next, init kernel memory objects.
@@ -177,22 +186,17 @@
 
 	/* kernel_object: for pageable anonymous kernel memory */
 	uao_init();
-	uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
-				 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
+	uvm.kernel_object = uao_create(virtual_end - virtual_avail,
+				       UAO_FLAG_KERNOBJ);
 
 	/*
 	 * init the map and reserve any space that might already
 	 * have been allocated kernel space before installing.
 	 */
 
-	uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
+	uvm_map_setup(&kernel_map_store, virtual_avail, virtual_end,
+		      VM_MAP_PAGEABLE);
 	kernel_map_store.pmap = pmap_kernel();
-	if (start != base &&
-	    uvm_map(&kernel_map_store, &base, start - base, NULL,
-		    UVM_UNKNOWN_OFFSET, 0,
-		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
-		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED)) != 0)
-		panic("uvm_km_init: could not reserve space for kernel");
 
 	/*
 	 * install!
Index: uvm/uvm_km.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_km.h,v
retrieving revision 1.10
diff -u -r1.10 uvm_km.h
--- uvm/uvm_km.h	2001/09/15 20:36:46	1.10
+++ uvm/uvm_km.h	2003/05/05 21:18:37
@@ -47,7 +47,7 @@
  * prototypes
  */
 
-void uvm_km_init __P((vaddr_t, vaddr_t));
+void uvm_km_init __P((void));
 void uvm_km_pgremove __P((struct uvm_object *, vaddr_t, vaddr_t));
 void uvm_km_pgremove_intrsafe __P((vaddr_t, vaddr_t));
 
Index: uvm/uvm_page.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_page.c,v
retrieving revision 1.86
diff -u -r1.86 uvm_page.c
--- uvm/uvm_page.c	2003/04/22 14:28:15	1.86
+++ uvm/uvm_page.c	2003/05/05 21:18:39
@@ -113,15 +113,6 @@
  */
 
 /*
- * these variables record the values returned by vm_page_bootstrap,
- * for debugging purposes.  The implementation of uvm_pageboot_alloc
- * and pmap_startup here also uses them internally.
- */
-
-static vaddr_t      virtual_space_start;
-static vaddr_t      virtual_space_end;
-
-/*
  * we use a hash table with only one bucket during bootup.  we will
  * later rehash (resize) the hash table once the allocator is ready.
  * we static allocate the one bootstrap bucket below...
@@ -242,8 +233,7 @@
  */
 
 void
-uvm_page_init(kvm_startp, kvm_endp)
-	vaddr_t *kvm_startp, *kvm_endp;
+uvm_page_init(void)
 {
 	vsize_t freepages, pagecount, bucketcount, n;
 	struct pgflbucket *bucketarray;
@@ -365,15 +355,6 @@
 	}
 
 	/*
-	 * pass up the values of virtual_space_start and
-	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
-	 * layers of the VM.
-	 */
-
-	*kvm_startp = round_page(virtual_space_start);
-	*kvm_endp = trunc_page(virtual_space_end);
-
-	/*
 	 * init locks for kernel threads
 	 */
 
@@ -458,11 +439,17 @@
 	 * on first call to this function, initialize ourselves.
 	 */
 	if (initialized == FALSE) {
-		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
+		/*
+		 * make sure machine-dependent code has initialized our
+		 * virtual address space boundaries properly.
+		 */
+		if (virtual_end <= virtual_avail)
+			panic("uvm_pageboot_alloc: MD code did not init "
+			    "KVA boundaries");
 
 		/* round it the way we like it */
-		virtual_space_start = round_page(virtual_space_start);
-		virtual_space_end = trunc_page(virtual_space_end);
+		virtual_avail = round_page(virtual_avail);
+		virtual_end = trunc_page(virtual_end);
 
 		initialized = TRUE;
 	}
@@ -475,11 +462,10 @@
 	/*
 	 * defer bootstrap allocation to MD code (it may want to allocate
 	 * from a direct-mapped segment).  pmap_steal_memory should adjust
-	 * virtual_space_start/virtual_space_end if necessary.
+	 * virtual_avail/virtual_end if necessary.
 	 */
 
-	addr = pmap_steal_memory(size, &virtual_space_start,
-	    &virtual_space_end);
+	addr = pmap_steal_memory(size);
 
 	return(addr);
 
@@ -488,11 +474,11 @@
 	/*
 	 * allocate virtual memory for this request
 	 */
-	if (virtual_space_start == virtual_space_end ||
-	    (virtual_space_end - virtual_space_start) < size)
+	if (virtual_avail == virtual_end ||
+	    (virtual_end - virtual_avail) < size)
 		panic("uvm_pageboot_alloc: out of virtual space");
 
-	addr = virtual_space_start;
+	addr = virtual_avail;
 
 #ifdef PMAP_GROWKERNEL
 	/*
@@ -506,7 +492,7 @@
 	}
 #endif
 
-	virtual_space_start += size;
+	virtual_avail += size;
 
 	/*
 	 * allocate and mapin physical pages to back new virtual pages
Index: uvm/uvm_page.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_page.h,v
retrieving revision 1.32
diff -u -r1.32 uvm_page.h
--- uvm/uvm_page.h	2002/11/08 02:05:16	1.32
+++ uvm/uvm_page.h	2003/05/05 21:18:39
@@ -263,7 +263,7 @@
  * prototypes: the following prototypes define the interface to pages
  */
 
-void uvm_page_init __P((vaddr_t *, vaddr_t *));
+void uvm_page_init __P((void));
 #if defined(UVM_PAGE_TRKOWN)
 void uvm_page_own __P((struct vm_page *, char *));
 #endif
Index: uvm/uvm_pmap.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_pmap.h,v
retrieving revision 1.12
diff -u -r1.12 uvm_pmap.h
--- uvm/uvm_pmap.h	2003/01/18 09:43:01	1.12
+++ uvm/uvm_pmap.h	2003/05/05 21:18:40
@@ -163,9 +163,8 @@
 void		 pmap_zero_page __P((paddr_t));
 #endif
 
-void		 pmap_virtual_space __P((vaddr_t *, vaddr_t *));
 #if defined(PMAP_STEAL_MEMORY)
-vaddr_t		 pmap_steal_memory __P((vsize_t, vaddr_t *, vaddr_t *));
+vaddr_t		 pmap_steal_memory __P((vsize_t));
 #endif
 
 #if defined(PMAP_FORK)

--Apple-Mail-3-849482151--