Port-amd64 archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: uvm physseg - put the lid on it.
>>>>> "Cherry" == Cherry G Mathew <cherry%zyx.in@localhost> writes:
>>>>> "Cherry" == Cherry G Mathew <cherry%zyx.in@localhost> writes:
Cherry> Hi Everyone, Here's a patch to put the vm physseg stuff
Cherry> behind bars.
Cherry> Hi,
Cherry> The previous patch had spurious whitespace and other noise
Cherry> in it, which I'm very sorry for.
Cherry> Here's a (hopefully) cleaner one. I've compile tested it on
Cherry> amd64.
and this one missed the diff -uN
Hopefull the one below has it all:
--
~~cherry
Index: sys/arch/amd64/amd64/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/amd64/amd64/machdep.c,v
retrieving revision 1.230
diff -u -r1.230 machdep.c
--- sys/arch/amd64/amd64/machdep.c 27 Aug 2016 16:17:16 -0000 1.230
+++ sys/arch/amd64/amd64/machdep.c 28 Sep 2016 12:22:17 -0000
@@ -799,10 +799,23 @@
setbit(sparse_dump_physmap, p);
}
}
- for (i = 0; i < vm_nphysseg; i++) {
- struct vm_physseg *seg = VM_PHYSMEM_PTR(i);
+ for (i = 0; i < VM_PHYSSEG_MAX; i++) {
+ paddr_t pfn;
- for (pg = seg->pgs; pg < seg->lastpg; pg++) {
+ if (uvm_physmem_valid(i) == false)
+ break;
+
+ const paddr_t startpfn = uvm_physmem_get_start(i);
+ const paddr_t endpfn = uvm_physmem_get_end(i);
+
+ KASSERT(startpfn != -1 && endpfn != -1);
+
+ /*
+ * We assume that seg->start to seg->end are
+ * uvm_page_physload()ed
+ */
+ for (pfn = startpfn; pfn <= endpfn; pfn++) {
+ pg = PHYS_TO_VM_PAGE(ptoa(pfn));
if (pg->uanon || (pg->pqflags & PQ_FREE) ||
(pg->uobject && pg->uobject->pgops)) {
p = VM_PAGE_TO_PHYS(pg) / PAGE_SIZE;
@@ -1445,56 +1458,29 @@
init_x86_64_msgbuf(void)
{
/* Message buffer is located at end of core. */
- struct vm_physseg *vps;
- psize_t sz = round_page(MSGBUFSIZE);
- psize_t reqsz = sz;
- int x;
+ psize_t reqsz = round_page(MSGBUFSIZE);
+ psize_t sz = 0;
- search_again:
- vps = NULL;
+ for (sz = 0; sz < reqsz; sz += PAGE_SIZE) {
+ paddr_t stolenpa;
- for (x = 0; x < vm_nphysseg; x++) {
- vps = VM_PHYSMEM_PTR(x);
- if (ctob(vps->avail_end) == avail_end)
+ if (!uvm_page_physget(&stolenpa))
break;
- }
- if (x == vm_nphysseg)
- panic("init_x86_64: can't find end of memory");
-
- /* Shrink so it'll fit in the last segment. */
- if ((vps->avail_end - vps->avail_start) < atop(sz))
- sz = ctob(vps->avail_end - vps->avail_start);
-
- vps->avail_end -= atop(sz);
- vps->end -= atop(sz);
- msgbuf_p_seg[msgbuf_p_cnt].sz = sz;
- msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->avail_end);
-
- /* Remove the last segment if it now has no pages. */
- if (vps->start == vps->end) {
- for (vm_nphysseg--; x < vm_nphysseg; x++)
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
-
- /* Now find where the new avail_end is. */
- for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
- if (VM_PHYSMEM_PTR(x)->avail_end > avail_end)
- avail_end = VM_PHYSMEM_PTR(x)->avail_end;
- avail_end = ctob(avail_end);
-
- if (sz == reqsz)
- return;
- reqsz -= sz;
- if (msgbuf_p_cnt == VM_PHYSSEG_MAX) {
- /* No more segments available, bail out. */
- printf("WARNING: MSGBUFSIZE (%zu) too large, using %zu.\n",
- (size_t)MSGBUFSIZE, (size_t)(MSGBUFSIZE - reqsz));
- return;
+ if (stolenpa == (msgbuf_p_seg[msgbuf_p_cnt].paddr
+ + PAGE_SIZE)) {
+ /* contiguous: append it to current buf alloc */
+ msgbuf_p_seg[msgbuf_p_cnt].sz += PAGE_SIZE;
+ } else {
+ /* non-contiguous: start a new msgbuf seg */
+ msgbuf_p_seg[msgbuf_p_cnt].sz = PAGE_SIZE;
+ msgbuf_p_seg[msgbuf_p_cnt++].paddr = stolenpa;
+ }
}
- sz = reqsz;
- goto search_again;
+ if (sz != reqsz)
+ printf("%s: could only allocate %ld bytes of requested %ld bytes\n",
+ __func__, sz, reqsz);
}
static void
Index: sys/arch/i386/i386/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/i386/i386/machdep.c,v
retrieving revision 1.761
diff -u -r1.761 machdep.c
--- sys/arch/i386/i386/machdep.c 27 Aug 2016 16:07:26 -0000 1.761
+++ sys/arch/i386/i386/machdep.c 28 Sep 2016 12:22:26 -0000
@@ -1032,56 +1032,29 @@
init386_msgbuf(void)
{
/* Message buffer is located at end of core. */
- struct vm_physseg *vps;
- psize_t sz = round_page(MSGBUFSIZE);
- psize_t reqsz = sz;
- unsigned int x;
-
- search_again:
- vps = NULL;
- for (x = 0; x < vm_nphysseg; ++x) {
- vps = VM_PHYSMEM_PTR(x);
- if (ctob(vps->avail_end) == avail_end) {
- break;
- }
- }
- if (x == vm_nphysseg)
- panic("init386: can't find end of memory");
+ psize_t reqsz = round_page(MSGBUFSIZE);
+ psize_t sz = 0;
- /* Shrink so it'll fit in the last segment. */
- if (vps->avail_end - vps->avail_start < atop(sz))
- sz = ctob(vps->avail_end - vps->avail_start);
-
- vps->avail_end -= atop(sz);
- vps->end -= atop(sz);
- msgbuf_p_seg[msgbuf_p_cnt].sz = sz;
- msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->avail_end);
-
- /* Remove the last segment if it now has no pages. */
- if (vps->start == vps->end) {
- for (--vm_nphysseg; x < vm_nphysseg; x++)
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
-
- /* Now find where the new avail_end is. */
- for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
- if (VM_PHYSMEM_PTR(x)->avail_end > avail_end)
- avail_end = VM_PHYSMEM_PTR(x)->avail_end;
- avail_end = ctob(avail_end);
+ for (sz = 0; sz < reqsz; sz += PAGE_SIZE) {
+ paddr_t stolenpa;
- if (sz == reqsz)
- return;
+ if (!uvm_page_physget(&stolenpa))
+ break;
- reqsz -= sz;
- if (msgbuf_p_cnt == VM_PHYSSEG_MAX) {
- /* No more segments available, bail out. */
- printf("WARNING: MSGBUFSIZE (%zu) too large, using %zu.\n",
- (size_t)MSGBUFSIZE, (size_t)(MSGBUFSIZE - reqsz));
- return;
+ if (stolenpa == (msgbuf_p_seg[msgbuf_p_cnt].paddr
+ + PAGE_SIZE)) {
+ /* contiguous: append it to current buf alloc */
+ msgbuf_p_seg[msgbuf_p_cnt].sz += PAGE_SIZE;
+ } else {
+ /* non-contiguous: start a new msgbuf seg */
+ msgbuf_p_seg[msgbuf_p_cnt].sz = PAGE_SIZE;
+ msgbuf_p_seg[msgbuf_p_cnt++].paddr = stolenpa;
+ }
}
- sz = reqsz;
- goto search_again;
+ if (sz != reqsz)
+ printf("%s: could only allocate %ld bytes of requested %ld bytes\n",
+ __func__, sz, reqsz);
}
#ifndef XEN
Index: sys/arch/x86/x86/x86_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/x86/x86_machdep.c,v
retrieving revision 1.75
diff -u -r1.75 x86_machdep.c
--- sys/arch/x86/x86/x86_machdep.c 1 Aug 2016 16:07:39 -0000 1.75
+++ sys/arch/x86/x86/x86_machdep.c 28 Sep 2016 12:22:32 -0000
@@ -67,6 +67,7 @@
#include <machine/vmparam.h>
#include <uvm/uvm_extern.h>
+#include <uvm/uvm_physmem.h>
#include "acpica.h"
#if NACPICA > 0
Index: sys/sys/systm.h
===================================================================
RCS file: /cvsroot/src/sys/sys/systm.h,v
retrieving revision 1.271
diff -u -r1.271 systm.h
--- sys/sys/systm.h 6 Jul 2016 05:20:48 -0000 1.271
+++ sys/sys/systm.h 28 Sep 2016 12:22:44 -0000
@@ -82,7 +82,7 @@
extern int selwait; /* select timeout address */
extern int maxmem; /* max memory per process */
-extern int physmem; /* physical memory */
+extern psize_t physmem; /* physical memory */
extern dev_t dumpdev; /* dump device */
extern dev_t dumpcdev; /* dump device (character equivalent) */
Index: sys/uvm/Makefile
===================================================================
RCS file: /cvsroot/src/sys/uvm/Makefile,v
retrieving revision 1.9
diff -u -r1.9 Makefile
--- sys/uvm/Makefile 11 Feb 2006 12:45:07 -0000 1.9
+++ sys/uvm/Makefile 28 Sep 2016 12:22:44 -0000
@@ -5,7 +5,7 @@
INCS= uvm.h uvm_amap.h uvm_anon.h uvm_aobj.h uvm_device.h \
uvm_extern.h uvm_fault.h uvm_fault_i.h uvm_glue.h \
uvm_km.h uvm_loan.h \
- uvm_map.h uvm_object.h uvm_page.h \
+ uvm_map.h uvm_object.h uvm_page.h uvm_physmem.h \
uvm_pager.h uvm_param.h uvm_pdaemon.h uvm_pglist.h \
uvm_pmap.h uvm_prot.h uvm_stat.h \
uvm_swap.h
Index: sys/uvm/files.uvm
===================================================================
RCS file: /cvsroot/src/sys/uvm/files.uvm,v
retrieving revision 1.26
diff -u -r1.26 files.uvm
--- sys/uvm/files.uvm 12 Aug 2016 13:40:21 -0000 1.26
+++ sys/uvm/files.uvm 28 Sep 2016 12:22:44 -0000
@@ -41,6 +41,7 @@
file uvm/uvm_pdpolicy_clock.c !pdpolicy_clockpro
file uvm/uvm_pdpolicy_clockpro.c pdpolicy_clockpro
file uvm/uvm_pglist.c uvm
+file uvm/uvm_physmem.c uvm
file uvm/uvm_readahead.c uvm
file uvm/uvm_stat.c uvm
file uvm/uvm_swap.c vmswap
Index: sys/uvm/uvm.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm.h,v
retrieving revision 1.66
diff -u -r1.66 uvm.h
--- sys/uvm/uvm.h 13 Apr 2015 22:04:44 -0000 1.66
+++ sys/uvm/uvm.h 28 Sep 2016 12:22:44 -0000
@@ -57,6 +57,7 @@
#include <uvm/uvm_object.h>
#include <uvm/uvm_page.h>
#include <uvm/uvm_pager.h>
+#include <uvm/uvm_physmem.h>
#include <uvm/uvm_pdaemon.h>
#include <uvm/uvm_swap.h>
Index: sys/uvm/uvm_extern.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_extern.h,v
retrieving revision 1.198
diff -u -r1.198 uvm_extern.h
--- sys/uvm/uvm_extern.h 20 Jul 2016 12:38:43 -0000 1.198
+++ sys/uvm/uvm_extern.h 28 Sep 2016 12:22:46 -0000
@@ -708,9 +708,6 @@
struct vm_page *);
void uvm_pagerealloc(struct vm_page *,
struct uvm_object *, voff_t);
-/* Actually, uvm_page_physload takes PF#s which need their own type */
-void uvm_page_physload(paddr_t, paddr_t, paddr_t,
- paddr_t, int);
void uvm_setpagesize(void);
/* uvm_pager.c */
Index: sys/uvm/uvm_page.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_page.c,v
retrieving revision 1.187
diff -u -r1.187 uvm_page.c
--- sys/uvm/uvm_page.c 11 Apr 2015 19:24:13 -0000 1.187
+++ sys/uvm/uvm_page.c 28 Sep 2016 12:22:51 -0000
@@ -87,6 +87,24 @@
#include <uvm/uvm_pdpolicy.h>
/*
+ * vm_physseg: describes one segment of physical memory
+ */
+struct vm_physseg {
+ paddr_t start; /* PF# of first page in segment */
+ paddr_t end; /* (PF# of last page in segment) + 1 */
+ paddr_t avail_start; /* PF# of first free page in segment */
+ paddr_t avail_end; /* (PF# of last free page in segment) +1 */
+ struct vm_page *pgs; /* vm_page structures (from start) */
+ struct vm_page *lastpg; /* vm_page structure for end */
+ int free_list; /* which free list they belong on */
+ u_int start_hint; /* start looking for free pages here */
+ /* protected by uvm_fpageqlock */
+#ifdef __HAVE_PMAP_PHYSSEG
+ struct pmap_physseg pmseg; /* pmap specific (MD) data */
+#endif
+};
+
+/*
* global vars... XXXCDC: move to uvm. structure.
*/
@@ -94,9 +112,6 @@
* physical memory config is stored in vm_physmem.
*/
-struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
-int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
-#define vm_nphysmem vm_nphysseg
/*
* Some supported CPUs in a given architecture don't support all
@@ -116,7 +131,7 @@
/*
* physical memory size;
*/
-int physmem;
+psize_t physmem;
/*
* local variables
@@ -337,11 +352,8 @@
static struct uvm_cpu boot_cpu;
psize_t freepages, pagecount, bucketcount, n;
struct pgflbucket *bucketarray, *cpuarray;
- struct vm_physseg *seg;
struct vm_page *pagearray;
int lcv;
- u_int i;
- paddr_t paddr;
KASSERT(ncpu <= 1);
CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *));
@@ -369,7 +381,7 @@
* now is to allocate vm_page structures for this memory.
*/
- if (vm_nphysmem == 0)
+ if (uvm_physmem_get_last() == -1)
panic("uvm_page_bootstrap: no memory pre-allocated");
/*
@@ -381,9 +393,11 @@
*/
freepages = 0;
- for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
- seg = VM_PHYSMEM_PTR(lcv);
- freepages += (seg->end - seg->start);
+
+ for (lcv = uvm_physmem_get_first();
+ lcv <= uvm_physmem_get_last() ;
+ lcv = uvm_physmem_get_next(lcv)) {
+ freepages += (uvm_physmem_get_end(lcv) - uvm_physmem_get_start(lcv));
}
/*
@@ -428,31 +442,19 @@
/*
* init the vm_page structures and put them in the correct place.
*/
+ for (lcv = uvm_physmem_get_first();
+ lcv <= uvm_physmem_get_last();
+ lcv = uvm_physmem_get_next(lcv)) {
+
+ n = uvm_physmem_get_end(lcv) - uvm_physmem_get_start(lcv);
+ n = ((n + 1) << PAGE_SHIFT) /
+ (PAGE_SIZE + sizeof(struct vm_page));
- for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
- seg = VM_PHYSMEM_PTR(lcv);
- n = seg->end - seg->start;
+ uvm_physmem_init_seg(lcv, pagearray);
/* set up page array pointers */
- seg->pgs = pagearray;
pagearray += n;
pagecount -= n;
- seg->lastpg = seg->pgs + n;
-
- /* init and free vm_pages (we've already zeroed them) */
- paddr = ctob(seg->start);
- for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
- seg->pgs[i].phys_addr = paddr;
-#ifdef __HAVE_VM_PAGE_MD
- VM_MDPAGE_INIT(&seg->pgs[i]);
-#endif
- if (atop(paddr) >= seg->avail_start &&
- atop(paddr) < seg->avail_end) {
- uvmexp.npages++;
- /* add page to free pool */
- uvm_pagefree(&seg->pgs[i]);
- }
- }
}
/*
@@ -625,92 +627,42 @@
static bool
uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
{
- struct vm_physseg *seg;
- int lcv, x;
+ int lcv;
/* pass 1: try allocating from a matching end */
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
- for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
+ for (lcv = uvm_physmem_get_last() ; lcv >= uvm_physmem_get_first() ; lcv = uvm_physmem_get_prev(lcv))
#else
- for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+ for (lcv = uvm_physmem_get_first() ; lcv <= uvm_physmem_get_last() ; lcv = uvm_physmem_get_next(lcv))
#endif
{
- seg = VM_PHYSMEM_PTR(lcv);
-
if (uvm.page_init_done == true)
panic("uvm_page_physget: called _after_ bootstrap");
- if (seg->free_list != freelist)
+ /* Try to match at front or back on unused segment */
+ if (uvm_page_physunload(lcv, freelist, paddrp) == false) {
+ if (paddrp == NULL) /* freelist fail, try next */
continue;
+ } else
+ return true;
- /* try from front */
- if (seg->avail_start == seg->start &&
- seg->avail_start < seg->avail_end) {
- *paddrp = ctob(seg->avail_start);
- seg->avail_start++;
- seg->start++;
- /* nothing left? nuke it */
- if (seg->avail_start == seg->end) {
- if (vm_nphysmem == 1)
- panic("uvm_page_physget: out of memory!");
- vm_nphysmem--;
- for (x = lcv ; x < vm_nphysmem ; x++)
- /* structure copy */
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
- return (true);
- }
-
- /* try from rear */
- if (seg->avail_end == seg->end &&
- seg->avail_start < seg->avail_end) {
- *paddrp = ctob(seg->avail_end - 1);
- seg->avail_end--;
- seg->end--;
- /* nothing left? nuke it */
- if (seg->avail_end == seg->start) {
- if (vm_nphysmem == 1)
- panic("uvm_page_physget: out of memory!");
- vm_nphysmem--;
- for (x = lcv ; x < vm_nphysmem ; x++)
- /* structure copy */
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
- return (true);
- }
- }
/* pass2: forget about matching ends, just allocate something */
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
- for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
+ for (lcv = uvm_physmem_get_last() ; lcv >= uvm_physmem_get_first() ; lcv = uvm_physmem_get_prev(lcv))
#else
- for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+ for (lcv = uvm_physmem_get_first() ; lcv <= uvm_physmem_get_last() ; lcv = uvm_physmem_get_next(lcv))
#endif
{
- seg = VM_PHYSMEM_PTR(lcv);
-
- /* any room in this bank? */
- if (seg->avail_start >= seg->avail_end)
- continue; /* nope */
-
- *paddrp = ctob(seg->avail_start);
- seg->avail_start++;
- /* truncate! */
- seg->start = seg->avail_start;
-
- /* nothing left? nuke it */
- if (seg->avail_start == seg->end) {
- if (vm_nphysmem == 1)
- panic("uvm_page_physget: out of memory!");
- vm_nphysmem--;
- for (x = lcv ; x < vm_nphysmem ; x++)
- /* structure copy */
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
+ /* Try the front regardless. */
+ if (uvm_page_physunload_force(lcv, freelist, paddrp) == false) {
+ if (paddrp == NULL) /* freelist fail, try next */
+ continue;
+ } else
+ return true;
}
- return (true);
}
-
- return (false); /* whoops! */
+ return false;
}
bool
@@ -727,228 +679,6 @@
#endif /* PMAP_STEAL_MEMORY */
/*
- * uvm_page_physload: load physical memory into VM system
- *
- * => all args are PFs
- * => all pages in start/end get vm_page structures
- * => areas marked by avail_start/avail_end get added to the free page pool
- * => we are limited to VM_PHYSSEG_MAX physical memory segments
- */
-
-void
-uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
- paddr_t avail_end, int free_list)
-{
- int preload, lcv;
- psize_t npages;
- struct vm_page *pgs;
- struct vm_physseg *ps;
-
- if (uvmexp.pagesize == 0)
- panic("uvm_page_physload: page size not set!");
- if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
- panic("uvm_page_physload: bad free list %d", free_list);
- if (start >= end)
- panic("uvm_page_physload: start >= end");
-
- /*
- * do we have room?
- */
-
- if (vm_nphysmem == VM_PHYSSEG_MAX) {
- printf("uvm_page_physload: unable to load physical memory "
- "segment\n");
- printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
- VM_PHYSSEG_MAX, (long long)start, (long long)end);
- printf("\tincrease VM_PHYSSEG_MAX\n");
- return;
- }
-
- /*
- * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
- * called yet, so kmem is not available).
- */
-
- for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
- if (VM_PHYSMEM_PTR(lcv)->pgs)
- break;
- }
- preload = (lcv == vm_nphysmem);
-
- /*
- * if VM is already running, attempt to kmem_alloc vm_page structures
- */
-
- if (!preload) {
- panic("uvm_page_physload: tried to add RAM after vm_mem_init");
- } else {
- pgs = NULL;
- npages = 0;
- }
-
- /*
- * now insert us in the proper place in vm_physmem[]
- */
-
-#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
- /* random: put it at the end (easy!) */
- ps = VM_PHYSMEM_PTR(vm_nphysmem);
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
- {
- int x;
- /* sort by address for binary search */
- for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
- if (start < VM_PHYSMEM_PTR(lcv)->start)
- break;
- ps = VM_PHYSMEM_PTR(lcv);
- /* move back other entries, if necessary ... */
- for (x = vm_nphysmem ; x > lcv ; x--)
- /* structure copy */
- VM_PHYSMEM_PTR_SWAP(x, x - 1);
- }
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
- {
- int x;
- /* sort by largest segment first */
- for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
- if ((end - start) >
- (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
- break;
- ps = VM_PHYSMEM_PTR(lcv);
- /* move back other entries, if necessary ... */
- for (x = vm_nphysmem ; x > lcv ; x--)
- /* structure copy */
- VM_PHYSMEM_PTR_SWAP(x, x - 1);
- }
-#else
- panic("uvm_page_physload: unknown physseg strategy selected!");
-#endif
-
- ps->start = start;
- ps->end = end;
- ps->avail_start = avail_start;
- ps->avail_end = avail_end;
- if (preload) {
- ps->pgs = NULL;
- } else {
- ps->pgs = pgs;
- ps->lastpg = pgs + npages;
- }
- ps->free_list = free_list;
- vm_nphysmem++;
-
- if (!preload) {
- uvmpdpol_reinit();
- }
-}
-
-/*
- * when VM_PHYSSEG_MAX is 1, we can simplify these functions
- */
-
-#if VM_PHYSSEG_MAX == 1
-static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, int *);
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
-static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, int *);
-#else
-static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, int *);
-#endif
-
-/*
- * vm_physseg_find: find vm_physseg structure that belongs to a PA
- */
-int
-vm_physseg_find(paddr_t pframe, int *offp)
-{
-
-#if VM_PHYSSEG_MAX == 1
- return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
- return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
-#else
- return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
-#endif
-}
-
-#if VM_PHYSSEG_MAX == 1
-static inline int
-vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
-{
-
- /* 'contig' case */
- if (pframe >= segs[0].start && pframe < segs[0].end) {
- if (offp)
- *offp = pframe - segs[0].start;
- return(0);
- }
- return(-1);
-}
-
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
-
-static inline int
-vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
-{
- /* binary search for it */
- u_int start, len, guess;
-
- /*
- * if try is too large (thus target is less than try) we reduce
- * the length to trunc(len/2) [i.e. everything smaller than "try"]
- *
- * if the try is too small (thus target is greater than try) then
- * we set the new start to be (try + 1). this means we need to
- * reduce the length to (round(len/2) - 1).
- *
- * note "adjust" below which takes advantage of the fact that
- * (round(len/2) - 1) == trunc((len - 1) / 2)
- * for any value of len we may have
- */
-
- for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
- guess = start + (len / 2); /* try in the middle */
-
- /* start past our try? */
- if (pframe >= segs[guess].start) {
- /* was try correct? */
- if (pframe < segs[guess].end) {
- if (offp)
- *offp = pframe - segs[guess].start;
- return guess; /* got it */
- }
- start = guess + 1; /* next time, start here */
- len--; /* "adjust" */
- } else {
- /*
- * pframe before try, just reduce length of
- * region, done in "for" loop
- */
- }
- }
- return(-1);
-}
-
-#else
-
-static inline int
-vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
-{
- /* linear search for it */
- int lcv;
-
- for (lcv = 0; lcv < nsegs; lcv++) {
- if (pframe >= segs[lcv].start &&
- pframe < segs[lcv].end) {
- if (offp)
- *offp = pframe - segs[lcv].start;
- return(lcv); /* got it */
- }
- }
- return(-1);
-}
-#endif
-
-/*
* PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
* back from an I/O mapping (ugh!). used in some MD code as well.
*/
@@ -956,12 +686,12 @@
uvm_phys_to_vm_page(paddr_t pa)
{
paddr_t pf = atop(pa);
- int off;
+ paddr_t off;
int psi;
psi = vm_physseg_find(pf, &off);
if (psi != -1)
- return(&VM_PHYSMEM_PTR(psi)->pgs[off]);
+ return uvm_physmem_get_pg(psi, off);
return(NULL);
}
@@ -2019,7 +1749,7 @@
lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
KASSERT(lcv != -1);
- return (VM_PHYSMEM_PTR(lcv)->free_list);
+ return uvm_physmem_get_free_list(lcv);
}
/*
@@ -2136,6 +1866,7 @@
uvm_page_printall(void (*pr)(const char *, ...))
{
unsigned i;
+ paddr_t pfn;
struct vm_page *pg;
(*pr)("%18s %4s %4s %18s %18s"
@@ -2143,8 +1874,14 @@
" OWNER"
#endif
"\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
- for (i = 0; i < vm_nphysmem; i++) {
- for (pg = VM_PHYSMEM_PTR(i)->pgs; pg < VM_PHYSMEM_PTR(i)->lastpg; pg++) {
+ for (i = uvm_physmem_get_first();
+ uvm_physmem_valid(i);
+ i = uvm_physmem_get_next(i)) {
+ for (pfn = uvm_physmem_get_start(i);
+ pfn <= uvm_physmem_get_end(i);
+ pfn++) {
+ pg = PHYS_TO_VM_PAGE(ptoa(pfn));
+
(*pr)("%18p %04x %04x %18p %18p",
pg, pg->flags, pg->pqflags, pg->uobject,
pg->uanon);
Index: sys/uvm/uvm_page.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_page.h,v
retrieving revision 1.80
diff -u -r1.80 uvm_page.h
--- sys/uvm/uvm_page.h 23 Mar 2015 07:59:12 -0000 1.80
+++ sys/uvm/uvm_page.h 28 Sep 2016 12:22:51 -0000
@@ -294,24 +294,6 @@
#define VM_PSTRAT_BSEARCH 2
#define VM_PSTRAT_BIGFIRST 3
-/*
- * vm_physseg: describes one segment of physical memory
- */
-struct vm_physseg {
- paddr_t start; /* PF# of first page in segment */
- paddr_t end; /* (PF# of last page in segment) + 1 */
- paddr_t avail_start; /* PF# of first free page in segment */
- paddr_t avail_end; /* (PF# of last free page in segment) +1 */
- struct vm_page *pgs; /* vm_page structures (from start) */
- struct vm_page *lastpg; /* vm_page structure for end */
- int free_list; /* which free list they belong on */
- u_int start_hint; /* start looking for free pages here */
- /* protected by uvm_fpageqlock */
-#ifdef __HAVE_PMAP_PHYSSEG
- struct pmap_physseg pmseg; /* pmap specific (MD) data */
-#endif
-};
-
#ifdef _KERNEL
/*
@@ -321,21 +303,6 @@
extern bool vm_page_zero_enable;
/*
- * physical memory config is stored in vm_physmem.
- */
-
-#define VM_PHYSMEM_PTR(i) (&vm_physmem[i])
-#if VM_PHYSSEG_MAX == 1
-#define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
-#else
-#define VM_PHYSMEM_PTR_SWAP(i, j) \
- do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
-#endif
-
-extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
-extern int vm_nphysseg;
-
-/*
* prototypes: the following prototypes define the interface to pages
*/
@@ -366,10 +333,14 @@
int uvm_page_lookup_freelist(struct vm_page *);
-int vm_physseg_find(paddr_t, int *);
+int vm_physseg_find(paddr_t, psize_t *);
struct vm_page *uvm_phys_to_vm_page(paddr_t);
paddr_t uvm_vm_page_to_phys(const struct vm_page *);
+#if !defined(PMAP_STEAL_MEMORY)
+bool uvm_page_physget(paddr_t *);
+#endif
+
/*
* macros
*/
Index: sys/uvm/uvm_pglist.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_pglist.c,v
retrieving revision 1.67
diff -u -r1.67 uvm_pglist.c
--- sys/uvm/uvm_pglist.c 26 Oct 2014 01:42:07 -0000 1.67
+++ sys/uvm/uvm_pglist.c 28 Sep 2016 12:22:52 -0000
@@ -116,11 +116,10 @@
}
static int
-uvm_pglistalloc_c_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high,
+uvm_pglistalloc_c_ps(int psi, int num, paddr_t low, paddr_t high,
paddr_t alignment, paddr_t boundary, struct pglist *rlist)
{
signed int candidate, limit, candidateidx, end, idx, skip;
- struct vm_page *pgs;
int pagemask;
bool second_pass;
#ifdef DEBUG
@@ -140,26 +139,26 @@
/*
* Make sure that physseg falls within with range to be allocated from.
*/
- if (high <= ps->avail_start || low >= ps->avail_end)
+ if (high <= uvm_physmem_get_avail_start(psi) || low >= uvm_physmem_get_avail_end(psi))
return 0;
/*
* We start our search at the just after where the last allocation
* succeeded.
*/
- candidate = roundup2(max(low, ps->avail_start + ps->start_hint), alignment);
- limit = min(high, ps->avail_end);
+ candidate = roundup2(max(low, uvm_physmem_get_avail_start(psi) +
+ uvm_physmem_get_start_hint(psi)), alignment);
+ limit = min(high, uvm_physmem_get_avail_end(psi));
pagemask = ~((boundary >> PAGE_SHIFT) - 1);
skip = 0;
second_pass = false;
- pgs = ps->pgs;
for (;;) {
bool ok = true;
signed int cnt;
if (candidate + num > limit) {
- if (ps->start_hint == 0 || second_pass) {
+ if (uvm_physmem_get_start_hint(psi) == 0 || second_pass) {
/*
* We've run past the allowable range.
*/
@@ -171,8 +170,9 @@
* is were we started.
*/
second_pass = true;
- candidate = roundup2(max(low, ps->avail_start), alignment);
- limit = min(limit, ps->avail_start + ps->start_hint);
+ candidate = roundup2(max(low, uvm_physmem_get_avail_start(psi)), alignment);
+ limit = min(limit, uvm_physmem_get_avail_start(psi) +
+ uvm_physmem_get_start_hint(psi));
skip = 0;
continue;
}
@@ -201,7 +201,7 @@
if (cidx != candidate - ps->start + num - 1)
panic("pgalloc contig: botch4");
#endif
- candidateidx = candidate - ps->start;
+ candidateidx = candidate - uvm_physmem_get_start(psi);
end = candidateidx + num;
/*
@@ -220,7 +220,7 @@
* testing most of those pages again in the next pass.
*/
for (idx = end - 1; idx >= candidateidx + skip; idx--) {
- if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
+ if (VM_PAGE_IS_FREE(uvm_physmem_get_pg(psi, idx)) == 0) {
ok = false;
break;
}
@@ -228,7 +228,7 @@
#ifdef DEBUG
if (idx > candidateidx) {
idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
- lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
+ lastidxpa = VM_PAGE_TO_PHYS(uvm_physmem_get_pg(psi, idx));
if ((lastidxpa + PAGE_SIZE) != idxpa) {
/*
* Region not contiguous.
@@ -249,7 +249,7 @@
if (ok) {
while (skip-- > 0) {
- KDASSERT(VM_PAGE_IS_FREE(&pgs[candidateidx + skip]));
+ KDASSERT(VM_PAGE_IS_FREE(uvm_physmem_get_pg(psi, candidateidx + skip)));
}
#ifdef PGALLOC_VERBOSE
printf(": ok\n");
@@ -280,19 +280,22 @@
/*
* we have a chunk of memory that conforms to the requested constraints.
*/
- for (idx = candidateidx, pgs += idx; idx < end; idx++, pgs++)
- uvm_pglist_add(pgs, rlist);
+ for (idx = candidateidx; idx < end; idx++)
+ uvm_pglist_add(uvm_physmem_get_pg(psi, idx), rlist);
/*
* the next time we need to search this segment, start after this
* chunk of pages we just allocated.
*/
- ps->start_hint = candidate + num - ps->avail_start;
- KASSERTMSG(ps->start_hint <= ps->avail_end - ps->avail_start,
+ uvm_physmem_set_start_hint(psi, candidate + num -
+ uvm_physmem_get_avail_start(psi));
+ KASSERTMSG(uvm_physmem_get_start_hint(psi) <=
+ uvm_physmem_get_avail_end(psi) - uvm_physmem_get_avail_start(psi),
"%x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
candidate + num,
- ps->start_hint, ps->start_hint, ps->avail_end, ps->avail_start,
- ps->avail_end - ps->avail_start);
+ uvm_physmem_get_start_hint(psi), uvm_physmem_get_start_hint(psi),
+ uvm_physmem_get_avail_end(psi), uvm_physmem_get_avail_start(psi),
+ uvm_physmem_get_avail_end(psi) - uvm_physmem_get_avail_start(psi));
#ifdef PGALLOC_VERBOSE
printf("got %d pgs\n", num);
@@ -305,7 +308,6 @@
paddr_t boundary, struct pglist *rlist)
{
int fl, psi;
- struct vm_physseg *ps;
int error;
/* Default to "lose". */
@@ -322,17 +324,16 @@
for (fl = 0; fl < VM_NFREELIST; fl++) {
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
- for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--)
+ for (psi = uvm_physmem_get_last(); uvm_physmem_valid(psi); psi = uvm_physmem_get_prev(psi))
+
#else
- for (psi = 0 ; psi < vm_nphysseg ; psi++)
+ for (psi = uvm_physmem_get_first(); uvm_physmem_valid(psi); psi = uvm_physmem_get_next(psi))
#endif
{
- ps = &vm_physmem[psi];
-
- if (ps->free_list != fl)
+ if (uvm_physmem_get_free_list(psi) != fl)
continue;
- num -= uvm_pglistalloc_c_ps(ps, num, low, high,
+ num -= uvm_pglistalloc_c_ps(psi, num, low, high,
alignment, boundary, rlist);
if (num == 0) {
#ifdef PGALLOC_VERBOSE
@@ -358,7 +359,7 @@
}
static int
-uvm_pglistalloc_s_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high,
+uvm_pglistalloc_s_ps(int psi, int num, paddr_t low, paddr_t high,
struct pglist *rlist)
{
int todo, limit, candidate;
@@ -369,36 +370,39 @@
#endif
KASSERT(mutex_owned(&uvm_fpageqlock));
- KASSERT(ps->start <= ps->avail_start);
- KASSERT(ps->start <= ps->avail_end);
- KASSERT(ps->avail_start <= ps->end);
- KASSERT(ps->avail_end <= ps->end);
+ KASSERT(uvm_physmem_get_start(psi) <= uvm_physmem_get_avail_start(psi));
+ KASSERT(uvm_physmem_get_start(psi) <= uvm_physmem_get_avail_end(psi));
+ KASSERT(uvm_physmem_get_avail_start(psi) <= uvm_physmem_get_end(psi));
+ KASSERT(uvm_physmem_get_avail_end(psi) <= uvm_physmem_get_end(psi));
low = atop(low);
high = atop(high);
todo = num;
- candidate = max(low, ps->avail_start + ps->start_hint);
- limit = min(high, ps->avail_end);
- pg = &ps->pgs[candidate - ps->start];
+ candidate = max(low, uvm_physmem_get_avail_start(psi) +
+ uvm_physmem_get_start_hint(psi));
+ limit = min(high, uvm_physmem_get_avail_end(psi));
+ pg = uvm_physmem_get_pg(psi, candidate - uvm_physmem_get_start(psi));
second_pass = false;
/*
* Make sure that physseg falls within with range to be allocated from.
*/
- if (high <= ps->avail_start || low >= ps->avail_end)
+ if (high <= uvm_physmem_get_avail_start(psi) ||
+ low >= uvm_physmem_get_avail_end(psi))
return 0;
again:
for (;; candidate++, pg++) {
if (candidate >= limit) {
- if (ps->start_hint == 0 || second_pass) {
+ if (uvm_physmem_get_start_hint(psi) == 0 || second_pass) {
candidate = limit - 1;
break;
}
second_pass = true;
- candidate = max(low, ps->avail_start);
- limit = min(limit, ps->avail_start + ps->start_hint);
- pg = &ps->pgs[candidate - ps->start];
+ candidate = max(low, uvm_physmem_get_avail_start(psi));
+ limit = min(limit, uvm_physmem_get_avail_start(psi) +
+ uvm_physmem_get_start_hint(psi));
+ pg = uvm_physmem_get_pg(psi, candidate - uvm_physmem_get_start(psi));
goto again;
}
#if defined(DEBUG)
@@ -426,12 +430,16 @@
* The next time we need to search this segment,
* start just after the pages we just allocated.
*/
- ps->start_hint = candidate + 1 - ps->avail_start;
- KASSERTMSG(ps->start_hint <= ps->avail_end - ps->avail_start,
+ uvm_physmem_set_start_hint(psi, candidate + 1 - uvm_physmem_get_avail_start(psi));
+ KASSERTMSG(uvm_physmem_get_start_hint(psi) <= uvm_physmem_get_avail_end(psi) -
+ uvm_physmem_get_avail_start(psi),
"%#x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
candidate + 1,
- ps->start_hint, ps->start_hint, ps->avail_end, ps->avail_start,
- ps->avail_end - ps->avail_start);
+ uvm_physmem_get_start_hint(psi),
+ uvm_physmem_get_start_hint(psi),
+ uvm_physmem_get_avail_end(psi),
+ uvm_physmem_get_avail_start(psi),
+ uvm_physmem_get_avail_end(psi) - uvm_physmem_get_avail_start(psi));
#ifdef PGALLOC_VERBOSE
printf("got %d pgs\n", num - todo);
@@ -444,7 +452,6 @@
struct pglist *rlist, int waitok)
{
int fl, psi, error;
- struct vm_physseg *ps;
/* Default to "lose". */
error = ENOMEM;
@@ -461,17 +468,16 @@
for (fl = 0; fl < VM_NFREELIST; fl++) {
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
- for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--)
+ for (psi = uvm_physmem_get_last(); uvm_physmem_valid(psi); psi = uvm_physmem_get_prev(psi))
+
#else
- for (psi = 0 ; psi < vm_nphysseg ; psi++)
+ for (psi = uvm_physmem_get_first(); uvm_physmem_valid(psi); psi = uvm_physmem_get_next(psi))
#endif
{
- ps = &vm_physmem[psi];
-
- if (ps->free_list != fl)
+ if (uvm_physmem_get_free_list(psi) != fl)
continue;
- num -= uvm_pglistalloc_s_ps(ps, num, low, high, rlist);
+ num -= uvm_pglistalloc_s_ps(psi, num, low, high, rlist);
if (num == 0) {
error = 0;
goto out;
Index: sys/uvm/uvm_physmem.c
===================================================================
RCS file: sys/uvm/uvm_physmem.c
diff -N sys/uvm/uvm_physmem.c
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ sys/uvm/uvm_physmem.c 28 Sep 2016 12:22:53 -0000
@@ -0,0 +1,618 @@
+/* $NetBSD$ */
+
+/*
+ * Copyright (c) 1997 Charles D. Cranor and Washington University.
+ * Copyright (c) 1991, 1993, The Regents of the University of California.
+ *
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * The Mach Operating System project at Carnegie-Mellon University.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)vm_page.h 7.3 (Berkeley) 4/21/91
+ * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
+ *
+ *
+ * Copyright (c) 1987, 1990 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution%CS.CMU.EDU@localhost
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * Consolidated API from uvm_page.c and others.
+ * Consolidated and designed by Cherry G. Mathew <cherry%zyx.in@localhost>
+ */
+
+#include "opt_uvm.h"
+
+#include <sys/types.h>
+
+#include <uvm/uvm.h>
+#include <uvm/uvm_page.h>
+#include <uvm/uvm_pdpolicy.h>
+#include <uvm/uvm_physmem.h>
+
+/*
+ * physical memory config is stored in vm_physmem.
+ */
+
+#define VM_PHYSMEM_PTR(i) (&vm_physmem[i])
+#if VM_PHYSSEG_MAX == 1
+#define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
+#else
+#define VM_PHYSMEM_PTR_SWAP(i, j) \
+ do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
+#endif
+
+/*
+ * vm_physseg: describes one segment of physical memory
+ */
+struct vm_physseg {
+ paddr_t start; /* PF# of first page in segment */
+ paddr_t end; /* (PF# of last page in segment) + 1 */
+ paddr_t avail_start; /* PF# of first free page in segment */
+ paddr_t avail_end; /* (PF# of last free page in segment) +1 */
+ struct vm_page *pgs; /* vm_page structures (from start) */
+ struct vm_page *lastpg; /* vm_page structure for end */
+ int free_list; /* which free list they belong on */
+ u_int start_hint; /* start looking for free pages here */
+ /* protected by uvm_fpageqlock */
+#ifdef __HAVE_PMAP_PHYSSEG
+ struct pmap_physseg pmseg; /* pmap specific (MD) data */
+#endif
+};
+
+
+static struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
+static int vm_nphysseg; /* XXXCDC: uvm.nphysseg */
+#define vm_nphysmem vm_nphysseg
+
+void
+uvm_physmem_init_seg(int lcv, struct vm_page *pgs)
+{
+ psize_t i;
+ psize_t n;
+ paddr_t paddr;
+ struct vm_physseg *seg;
+
+ KASSERT(lcv >= 0 && lcv < vm_nphysmem);
+
+ seg = VM_PHYSMEM_PTR(lcv);
+
+ KASSERT(seg->pgs == NULL);
+
+ n = seg->end - seg->start;
+ n = ((n + 1) << PAGE_SHIFT) /
+ (PAGE_SIZE + sizeof(struct vm_page));
+
+ seg->pgs = pgs;
+ seg->lastpg = seg->pgs + n;
+
+ /* init and free vm_pages (we've already zeroed them) */
+ paddr = ctob(seg->start);
+ for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
+ seg->pgs[i].phys_addr = paddr;
+#ifdef __HAVE_VM_PAGE_MD
+ VM_MDPAGE_INIT(&seg->pgs[i]);
+#endif
+ if (atop(paddr) >= seg->avail_start &&
+ atop(paddr) < seg->avail_end) {
+ uvmexp.npages++;
+ /* add page to free pool */
+ uvm_pagefree(&seg->pgs[i]);
+ }
+ }
+}
+
+/*
+ * Boot protocol dictates that these must be able to return partially
+ * initialised segments.
+ */
+paddr_t
+uvm_physmem_get_start(int lcv)
+{
+ if (uvm_physmem_valid(lcv) == false)
+ return (paddr_t) -1;
+
+ return VM_PHYSMEM_PTR(lcv)->start;
+}
+
+paddr_t
+uvm_physmem_get_end(int lcv)
+{
+ if (uvm_physmem_valid(lcv) == false)
+ return (paddr_t) -1;
+
+ return VM_PHYSMEM_PTR(lcv)->end;
+}
+
+paddr_t
+uvm_physmem_get_avail_start(int lcv)
+{
+ if (uvm_physmem_valid(lcv) == false)
+ return (paddr_t) -1;
+
+ return VM_PHYSMEM_PTR(lcv)->avail_start;
+}
+
+paddr_t
+uvm_physmem_get_avail_end(int lcv)
+{
+ if (uvm_physmem_valid(lcv) == false)
+ return (paddr_t) -1;
+
+ return VM_PHYSMEM_PTR(lcv)->avail_end;
+}
+
+struct vm_page *
+uvm_physmem_get_pg(int lcv, paddr_t idx)
+{
+ return &VM_PHYSMEM_PTR(lcv)->pgs[idx];
+}
+
+int
+uvm_physmem_get_free_list(int lcv)
+{
+ return VM_PHYSMEM_PTR(lcv)->free_list;
+}
+
+u_int
+uvm_physmem_get_start_hint(int lcv)
+{
+ return VM_PHYSMEM_PTR(lcv)->start_hint;
+}
+
+bool
+uvm_physmem_set_start_hint(int lcv, u_int start_hint)
+{
+ if (uvm_physmem_valid(lcv) == false)
+ return false;
+
+ VM_PHYSMEM_PTR(lcv)->start_hint = start_hint;
+ return true;
+}
+
+int
+uvm_physmem_get_next(int lcv)
+{
+ return (lcv + 1);
+}
+
+int
+uvm_physmem_get_prev(int lcv)
+{
+ return (lcv - 1);
+}
+
+int
+uvm_physmem_get_last(void)
+{
+ return (vm_nphysseg - 1);
+}
+
+int
+uvm_physmem_get_first(void)
+{
+ return 0;
+}
+
+bool
+uvm_physmem_valid(int lcv)
+{
+ struct vm_physseg *ps;
+
+ if (lcv < 0)
+ return false;
+
+ if (lcv >= vm_nphysseg)
+ return false;
+
+ /*
+ * This is the delicate init dance -
+ * needs to go with the dance.
+ */
+ if (uvm.page_init_done != true)
+ return true;
+
+ ps = VM_PHYSMEM_PTR(lcv);
+
+ /* Extra checks needed only post uvm_page_init() */
+ if (ps->pgs == NULL)
+ return false;
+
+ if (ps->lastpg == NULL)
+ return false;
+
+ /* XXX: etc. */
+
+ return true;
+
+}
+
+
+paddr_t
+uvm_physmem_get_highest(void)
+{
+ int lcv;
+ paddr_t last = 0;
+ struct vm_physseg *ps;
+
+ for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+ ps = VM_PHYSMEM_PTR(lcv);
+ if (last < ps->end)
+ last = ps->end;
+ }
+
+ return last;
+}
+
+#if !defined(PMAP_STEAL_MEMORY)
+
+/*
+ * uvm_page_physunload: unload physical memory and return it to
+ * caller.
+ */
+bool
+uvm_page_physunload(int psi, int freelist, paddr_t *paddrp)
+{
+ int x;
+ struct vm_physseg *seg;
+
+ seg = VM_PHYSMEM_PTR(psi);
+
+ if (seg->free_list != freelist) {
+ paddrp = NULL;
+ return false;
+ }
+
+ /* try from front */
+ if (seg->avail_start == seg->start &&
+ seg->avail_start < seg->avail_end) {
+ *paddrp = ctob(seg->avail_start);
+ seg->avail_start++;
+ /* nothing left? nuke it */
+ if (seg->avail_start == seg->end) {
+ if (vm_nphysmem == 1)
+ panic("uvm_page_physget: out of memory!");
+ vm_nphysmem--;
+ for (x = psi ; x < vm_nphysmem ; x++)
+ /* structure copy */
+ VM_PHYSMEM_PTR_SWAP(x, x + 1);
+ }
+ return (true);
+ }
+
+ /* try from rear */
+ if (seg->avail_end == seg->end &&
+ seg->avail_start < seg->avail_end) {
+ *paddrp = ctob(seg->avail_end - 1);
+ seg->avail_end--;
+ /* nothing left? nuke it */
+ if (seg->avail_end == seg->start) {
+ if (vm_nphysmem == 1)
+ panic("uvm_page_physget: out of memory!");
+ vm_nphysmem--;
+ for (x = psi ; x < vm_nphysmem ; x++)
+ /* structure copy */
+ VM_PHYSMEM_PTR_SWAP(x, x + 1);
+ }
+ return (true);
+ }
+
+ return false;
+}
+
+bool
+uvm_page_physunload_force(int psi, int freelist, paddr_t *paddrp)
+{
+ int x;
+ struct vm_physseg *seg;
+
+ seg = VM_PHYSMEM_PTR(psi);
+
+ /* any room in this bank? */
+ if (seg->avail_start >= seg->avail_end) {
+ paddrp = NULL;
+ return false; /* nope */
+ }
+
+ *paddrp = ctob(seg->avail_start);
+ seg->avail_start++;
+
+ /* nothing left? nuke it */
+ if (seg->avail_start == seg->end) {
+ if (vm_nphysmem == 1)
+ panic("uvm_page_physget: out of memory!");
+ vm_nphysmem--;
+ for (x = psi ; x < vm_nphysmem ; x++)
+ /* structure copy */
+ VM_PHYSMEM_PTR_SWAP(x, x + 1);
+ }
+ return (true);
+}
+
+#endif /* !defined(PMAP_STEAL_MEMORY) */
+
+/*
+ * uvm_page_physload: load physical memory into VM system
+ *
+ * => all args are PFs
+ * => all pages in start/end get vm_page structures
+ * => areas marked by avail_start/avail_end get added to the free page pool
+ * => we are limited to VM_PHYSSEG_MAX physical memory segments
+ */
+
+void
+uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
+ paddr_t avail_end, int free_list)
+{
+ int preload, lcv;
+ psize_t npages;
+ struct vm_page *pgs;
+ struct vm_physseg *ps;
+
+ if (uvmexp.pagesize == 0)
+ panic("uvm_page_physload: page size not set!");
+ if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
+ panic("uvm_page_physload: bad free list %d", free_list);
+ if (start >= end)
+ panic("uvm_page_physload: start >= end");
+
+ /*
+ * do we have room?
+ */
+
+ if (vm_nphysmem == VM_PHYSSEG_MAX) {
+ printf("uvm_page_physload: unable to load physical memory "
+ "segment\n");
+ printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
+ VM_PHYSSEG_MAX, (long long)start, (long long)end);
+ printf("\tincrease VM_PHYSSEG_MAX\n");
+ return;
+ }
+
+ /*
+ * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
+ * called yet, so kmem is not available).
+ */
+
+ for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
+ if (VM_PHYSMEM_PTR(lcv)->pgs)
+ break;
+ }
+ preload = (lcv == vm_nphysmem);
+
+ /*
+ * if VM is already running, attempt to kmem_alloc vm_page structures
+ */
+
+ if (!preload) {
+ npages = end - start;
+ pgs = kmem_zalloc(sizeof *pgs * npages, KM_SLEEP);
+ } else {
+ pgs = NULL;
+ npages = 0;
+ }
+
+ /*
+ * now insert us in the proper place in vm_physmem[]
+ */
+
+#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
+ /* random: put it at the end (easy!) */
+ ps = VM_PHYSMEM_PTR(vm_nphysmem);
+#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
+ {
+ int x;
+ /* sort by address for binary search */
+ for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+ if (start < VM_PHYSMEM_PTR(lcv)->start)
+ break;
+ ps = VM_PHYSMEM_PTR(lcv);
+ /* move back other entries, if necessary ... */
+ for (x = vm_nphysmem ; x > lcv ; x--)
+ /* structure copy */
+ VM_PHYSMEM_PTR_SWAP(x, x - 1);
+ }
+#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
+ {
+ int x;
+ /* sort by largest segment first */
+ for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+ if ((end - start) >
+ (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
+ break;
+ ps = VM_PHYSMEM_PTR(lcv);
+ /* move back other entries, if necessary ... */
+ for (x = vm_nphysmem ; x > lcv ; x--)
+ /* structure copy */
+ VM_PHYSMEM_PTR_SWAP(x, x - 1);
+ }
+#else
+ panic("uvm_page_physload: unknown physseg strategy selected!");
+#endif
+
+ ps->start = start;
+ ps->end = end;
+ ps->avail_start = avail_start;
+ ps->avail_end = avail_end;
+
+ ps->pgs = pgs;
+ ps->lastpg = pgs + npages;
+
+ ps->free_list = free_list;
+ vm_nphysmem++;
+
+ if (!preload) {
+ paddr_t i;
+ paddr_t paddr;
+
+ /* init and free vm_pages (we've already zeroed them) */
+ paddr = ctob(ps->start);
+ for (i = 0 ; i < npages ; i++, paddr += PAGE_SIZE) {
+ ps->pgs[i].phys_addr = paddr;
+#ifdef __HAVE_VM_PAGE_MD
+ VM_MDPAGE_INIT(&ps->pgs[i]);
+#endif
+ if (atop(paddr) >= ps->avail_start &&
+ atop(paddr) < ps->avail_end) {
+ uvmexp.npages++;
+ /* add page to free pool */
+ uvm_pagefree(&ps->pgs[i]);
+ }
+ }
+
+ physmem += npages;
+
+ uvmpdpol_reinit();
+ }
+}
+
+/*
+ * when VM_PHYSSEG_MAX is 1, we can simplify these functions
+ */
+
+#if VM_PHYSSEG_MAX == 1
+static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, psize_t *);
+#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
+static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, psize_t *);
+#else
+static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, psize_t *);
+#endif
+
+/*
+ * vm_physseg_find: find vm_physseg structure that belongs to a PA
+ */
+int
+vm_physseg_find(paddr_t pframe, psize_t *offp)
+{
+
+#if VM_PHYSSEG_MAX == 1
+ return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
+#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
+ return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
+#else
+ return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
+#endif
+}
+
+#if VM_PHYSSEG_MAX == 1
+static inline int
+vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
+{
+
+ /* 'contig' case */
+ if (pframe >= segs[0].start && pframe < segs[0].end) {
+ if (offp)
+ *offp = pframe - segs[0].start;
+ return(0);
+ }
+ return(-1);
+}
+
+#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
+
+static inline int
+vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
+{
+ /* binary search for it */
+ u_int start, len, guess;
+
+ /*
+ * if try is too large (thus target is less than try) we reduce
+ * the length to trunc(len/2) [i.e. everything smaller than "try"]
+ *
+ * if the try is too small (thus target is greater than try) then
+ * we set the new start to be (try + 1). this means we need to
+ * reduce the length to (round(len/2) - 1).
+ *
+ * note "adjust" below which takes advantage of the fact that
+ * (round(len/2) - 1) == trunc((len - 1) / 2)
+ * for any value of len we may have
+ */
+
+ for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
+ guess = start + (len / 2); /* try in the middle */
+
+ /* start past our try? */
+ if (pframe >= segs[guess].start) {
+ /* was try correct? */
+ if (pframe < segs[guess].end) {
+ if (offp)
+ *offp = pframe - segs[guess].start;
+ return guess; /* got it */
+ }
+ start = guess + 1; /* next time, start here */
+ len--; /* "adjust" */
+ } else {
+ /*
+ * pframe before try, just reduce length of
+ * region, done in "for" loop
+ */
+ }
+ }
+ return(-1);
+}
+
+#else
+
+static inline int
+vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
+{
+ /* linear search for it */
+ int lcv;
+
+ for (lcv = 0; lcv < nsegs; lcv++) {
+ if (pframe >= segs[lcv].start &&
+ pframe < segs[lcv].end) {
+ if (offp)
+ *offp = pframe - segs[lcv].start;
+ return(lcv); /* got it */
+ }
+ }
+ return(-1);
+}
+#endif
Index: sys/uvm/uvm_physmem.h
===================================================================
RCS file: sys/uvm/uvm_physmem.h
diff -N sys/uvm/uvm_physmem.h
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ sys/uvm/uvm_physmem.h 28 Sep 2016 12:22:53 -0000
@@ -0,0 +1,66 @@
+/* $NetBSD$ */
+
+/*
+ * Consolidated API from uvm_page.c and others.
+ * Consolidated and designed by Cherry G. Mathew <cherry%zyx.in@localhost>
+ */
+
+#ifndef _UVM_UVM_PHYSMEM_H_
+#define _UVM_UVM_PHYSMEM_H_
+
+#include <sys/cdefs.h>
+#include <sys/param.h>
+
+/*
+ * We expect machine/uvm_physseg.h to include all API calls required
+ * to implement this API.
+ *
+ * No APIs are explicitly #included in uvm_physmem.c
+ */
+
+#include <machine/uvm_physmem.h>
+
+void uvm_physmem_init_seg(int, struct vm_page *);
+
+bool uvm_physmem_valid(int);
+
+/*
+ * Return start/end pfn of given segment
+ * Returns: -1 if the segment number is invalid
+ */
+paddr_t uvm_physmem_get_start(int);
+paddr_t uvm_physmem_get_end(int);
+
+paddr_t uvm_physmem_get_avail_start(int);
+paddr_t uvm_physmem_get_avail_end(int);
+
+struct vm_page * uvm_physmem_get_pg(int, paddr_t);
+
+int uvm_physmem_get_free_list(int);
+u_int uvm_physmem_get_start_hint(int);
+bool uvm_physmem_set_start_hint(int, u_int);
+
+/*
+ * Functions to help walk the list of segments.
+ * Returns: -1 if the segment number is invalid
+ */
+int uvm_physmem_get_next(int);
+int uvm_physmem_get_prev(int);
+int uvm_physmem_get_first(void);
+int uvm_physmem_get_last(void);
+
+
+/* Return the frame number of the highest registered physical page frame */
+paddr_t uvm_physmem_get_highest(void);
+
+/* Actually, uvm_page_physload takes PF#s which need their own type */
+void uvm_page_physload(paddr_t, paddr_t, paddr_t,
+ paddr_t, int);
+
+#if !defined(PMAP_STEAL_MEMORY)
+bool uvm_page_physunload(int, int, paddr_t *);
+bool uvm_page_physunload_force(int, int, paddr_t *);
+#endif /* !defined(PMAP_STEAL_MEMORY) */
+int vm_physseg_find(paddr_t, psize_t *);
+
+#endif /* _UVM_UVM_PHYSMEM_H_ */
Home |
Main Index |
Thread Index |
Old Index