Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/sparc/sparc Change `pv list' management to use the ...



details:   https://anonhg.NetBSD.org/src/rev/128235e04dc1
branches:  trunk
changeset: 504394:128235e04dc1
user:      pk <pk%NetBSD.org@localhost>
date:      Thu Mar 01 15:52:18 2001 +0000

description:
Change `pv list' management to use the hooks provided in the `vm_physmem'
structure. While this comes with the cost of having to search the
`vm_physmem' array every time need to find a PV entry corresponing to
some physical address, we gain the flexibility needed to support
arbitrary non-contiguous ranges of physical memory addresses.

Also, eliminate the need to sort the memory address ranges as presented
by the machine's PROM, and the requirement that physical memory starts
at address 0 (when possible).

diffstat:

 sys/arch/sparc/sparc/pmap.c |  677 +++++++++++++++++++------------------------
 1 files changed, 302 insertions(+), 375 deletions(-)

diffs (truncated from 1313 to 300 lines):

diff -r 32e4329dd018 -r 128235e04dc1 sys/arch/sparc/sparc/pmap.c
--- a/sys/arch/sparc/sparc/pmap.c       Thu Mar 01 15:13:31 2001 +0000
+++ b/sys/arch/sparc/sparc/pmap.c       Thu Mar 01 15:52:18 2001 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.180 2001/02/16 23:00:11 pk Exp $ */
+/*     $NetBSD: pmap.c,v 1.181 2001/03/01 15:52:18 pk Exp $ */
 
 /*
  * Copyright (c) 1996
@@ -155,9 +155,12 @@
 #endif
 
 /*
- * First and last managed physical addresses.
+ * Bounds on managed physical addresses. Used by (MD) users
+ * of uvm_pglistalloc() to provide search hints.
  */
-paddr_t        vm_first_phys, vm_num_phys;
+paddr_t        vm_first_phys = (paddr_t)-1;
+paddr_t        vm_last_phys = 0;
+psize_t vm_num_phys;
 
 /*
  * For each managed physical page, there is a list of all currently
@@ -196,19 +199,27 @@
 #define PV_C4M         4       /* page _can_ be cached (SRMMU) */
 #define PV_ANC         0x10    /* page has incongruent aliases */
 
-struct pvlist *pv_table;       /* array of entries, one per physical page */
-
-#define pvhead(pa)     (&pv_table[(pa) >> PGSHIFT])
-
-static psize_t pv_table_map __P((int));
-/*
- * Physical memory to map pv_table[] (allocated in pmap_bootstrap()
- * and used in pv_table_map()).
- */
-static paddr_t pv_table_phys_storage;
-
 static struct pool pv_pool;
 
+static int pmap_initialized;   /* XXX - allow pmap_enter() before the
+                                *       pv tables are allocated.
+                                */
+
+static struct pvlist *pvhead(paddr_t pfn)
+{
+       int bank, off;
+
+#ifdef DIAGNOSTIC
+       if (pmap_initialized == 0)
+               panic("pvhead: not initialized");
+#endif
+
+       bank = vm_physseg_find(pfn, &off);
+       if (bank == -1)
+               return (NULL);
+
+       return (&vm_physmem[bank].pmseg.pvhead[off]);
+}
 
 /*
  * Each virtual segment within each pmap is either valid or invalid.
@@ -349,17 +360,15 @@
 #define        MA_SIZE 32              /* size of memory descriptor arrays */
 struct memarr pmemarr[MA_SIZE];/* physical memory regions */
 int    npmemarr;               /* number of entries in pmemarr */
-/*static*/ paddr_t     avail_start;    /* first free physical page */
-/*static*/ paddr_t     avail_end;      /* last free physical page */
-/*static*/ vaddr_t     virtual_avail;  /* first free virtual page number */
-/*static*/ vaddr_t     virtual_end;    /* last free virtual page number */
-/*static*/ vaddr_t     etext_gap_start;/* start of gap between text & data */
-/*static*/ vaddr_t     etext_gap_end;  /* end of gap between text & data */
-/*static*/ paddr_t     etext_gap_start_pa;/* gap start, physical */
-/*static*/ paddr_t     etext_gap_end_pa;/* gap start, physical */
+
+static paddr_t avail_start;    /* first available physical page, other
+                                  than the `etext gap' defined below */
+static vaddr_t etext_gap_start;/* start of gap between text & data */
+static vaddr_t etext_gap_end;  /* end of gap between text & data */
+static vaddr_t virtual_avail;  /* first free kernel virtual address */
+static vaddr_t virtual_end;    /* last free kernel virtual address */
 
 static void pmap_page_upload __P((void));
-void pmap_release __P((pmap_t));
 
 int mmu_has_hole;
 
@@ -696,7 +705,6 @@
 
 
 static void get_phys_mem __P((void));
-static void sortm __P((struct memarr *, int));
 void   pv_flushcache __P((struct pvlist *));
 void   kvm_iocache __P((caddr_t, int));
 
@@ -706,12 +714,23 @@
 void   pm_check_u __P((char *, struct pmap *));
 #endif
 
+/*
+ * During the PMAP bootstrap, we can use a simple translation to map a
+ * kernel virtual address to a psysical memory address (this is arranged
+ * in locore).  Usually, KERNBASE maps to physical address 0. This is always
+ * the case on sun4 and sun4c machines. On sun4m machines -- if no memory is
+ * installed in the bank corresponding to physical address 0 -- the PROM may
+ * elect to load us at some other address, presumably at the start of
+ * the first memory bank that is available. We set the up the variable
+ * `va2pa_offset' to hold the physical address corresponding to KERNBASE.
+ */
+
+static u_long va2pa_offset = KERNBASE;
+#define PMAP_BOOTSTRAP_VA2PA(v) ((paddr_t)((u_long)(v) - va2pa_offset))
 
 /*
- * Grab physical memory list and use it to compute `physmem' and
- * `avail_end'. The latter is used in conjunction with
- * `avail_start' to dispatch left-over physical pages to the
- * VM system.
+ * Grab physical memory list.
+ * While here, compute `physmem'.
  */
 void
 get_phys_mem()
@@ -720,43 +739,11 @@
        int i;
 
        npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_AVAILPHYS);
-       sortm(pmemarr, npmemarr);
-       if (pmemarr[0].addr != 0)
-               panic("pmap_bootstrap: no memory?!");
-
-       avail_end = pmemarr[npmemarr-1].addr + pmemarr[npmemarr-1].len;
+
        for (physmem = 0, mp = pmemarr, i = npmemarr; --i >= 0; mp++)
                physmem += btoc(mp->len);
 }
 
-/*
- * Sort a memory array by address.
- */
-static void
-sortm(mp, n)
-       struct memarr *mp;
-       int n;
-{
-       struct memarr *mpj;
-       int i, j;
-       paddr_t addr;
-       psize_t len;
-
-       /* Insertion sort.  This is O(n^2), but so what? */
-       for (i = 1; i < n; i++) {
-               /* save i'th entry */
-               addr = mp[i].addr;
-               len = mp[i].len;
-               /* find j such that i'th entry goes before j'th */
-               for (j = 0, mpj = mp; j < i; j++, mpj++)
-                       if (addr < mpj->addr)
-                               break;
-               /* slide up any additional entries */
-               memmove(mpj + 1, mpj, (i - j) * sizeof(*mp));
-               mpj->addr = addr;
-               mpj->len = len;
-       }
-}
 
 /*
  * Support functions for vm_page_bootstrap().
@@ -781,35 +768,50 @@
 static void
 pmap_page_upload()
 {
-       int     n = 0;
+       int     n;
        paddr_t start, end;
 
+       /* First, the `etext gap' */
+       start = PMAP_BOOTSTRAP_VA2PA(etext_gap_start);
+       end = PMAP_BOOTSTRAP_VA2PA(etext_gap_end);
 #ifdef DIAGNOSTIC
-       if (avail_start <= etext_gap_end_pa)
+       if (avail_start <= start)
                panic("pmap_page_upload: etext gap overlap: %lx < %lx",
-                       (u_long)avail_start, (u_long)etext_gap_end_pa);
-#endif
-
-#if 0 /* not yet, managed() et.al. cannot deal with this */
-       /* First, the `etext gap' */
-       if (etext_gap_start_pa < etext_gap_end_pa)
+                       (u_long)avail_start, (u_long)start);
+#endif
+       if (etext_gap_start < etext_gap_end) {
+               vm_first_phys = start;
                uvm_page_physload(
-                       atop(etext_gap_start_pa),
-                       atop(etext_gap_end_pa),
-                       atop(etext_gap_start_pa),
-                       atop(etext_gap_end_pa), VM_FREELIST_DEFAULT);
-#endif
+                       atop(start),
+                       atop(end),
+                       atop(start),
+                       atop(end), VM_FREELIST_DEFAULT);
+       }
 
        for (n = 0; n < npmemarr; n++) {
+
+               start = pmemarr[n].addr;
+               end = start + pmemarr[n].len;
+
                /*
-                * Assume `avail_start' is always in the first segment; we
-                * already made that assumption in pmap_bootstrap()..
+                * If this segment contains `avail_start', we must exclude
+                * the range of initial kernel memory as computed by
+                * pmap_bootstrap(). Note that this will also exclude
+                * the `etext gap' range already uploaded above.
                 */
-               start = (n == 0) ? avail_start : pmemarr[n].addr;
-               end = pmemarr[n].addr + pmemarr[n].len;
+               if (start <= avail_start && avail_start < end)
+                       start = avail_start;
+
                if (start == end)
                        continue;
 
+               /* Update vm_{first_last}_phys */
+               if (vm_first_phys > start)
+                       vm_first_phys = start;
+
+               if (vm_last_phys < end)
+                       vm_last_phys = end;
+
                uvm_page_physload(
                        atop(start),
                        atop(end),
@@ -818,6 +820,9 @@
        }
 }
 
+/*
+ * This routine is used by mmrw() to validate access to `/dev/mem'.
+ */
 int
 pmap_pa_exists(pa)
        paddr_t pa;
@@ -1007,9 +1012,6 @@
                      "(How are we running?");
                break;
        case SRMMU_TEPTE:
-#ifdef DEBUG
-               printf("mmu_reservemon4m: trying to remap 4G segment!\n");
-#endif
                panic("mmu_reservemon4m: can't handle ROM 4G page size");
                /* XXX: Should make this work, however stupid it is */
                break;
@@ -1218,7 +1220,7 @@
 {
        struct mmuentry *me;
        struct pmap *pm;
-       int i, va, pa, *pte, tpte;
+       int i, va, *pte, tpte;
        int ctx;
        struct regmap *rp;
        struct segmap *sp;
@@ -1326,9 +1328,10 @@
        do {
                tpte = getpte4(va);
                if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
-                       pa = ptoa(tpte & PG_PFNUM);
-                       if (managed(pa))
-                               pvhead(pa)->pv_flags |= MR4_4C(tpte);
+                       u_int pfn = tpte & PG_PFNUM;
+                       struct pvlist *pv;
+                       if ((pv = pvhead(pfn)) != NULL)
+                               pv->pv_flags |= MR4_4C(tpte);
                }
                *pte++ = tpte & ~(PG_U|PG_M);
                va += NBPG;
@@ -1373,7 +1376,7 @@
        u_int pmeg;
 {
        struct mmuentry *me = &mmusegments[pmeg];
-       int i, va, pa, tpte;
+       int i, va, tpte;
        int vr;
        struct regmap *rp;
 
@@ -1407,9 +1410,10 @@
        do {
                tpte = getpte4(va);
                if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
-                       pa = ptoa(tpte & PG_PFNUM);
-                       if (managed(pa))
-                               pvhead(pa)->pv_flags |= MR4_4C(tpte);
+                       u_int pfn = tpte & PG_PFNUM;
+                       struct pvlist *pv;
+                       if ((pv = pvhead(pfn)) != NULL)
+                               pv->pv_flags |= MR4_4C(tpte);



Home | Main Index | Thread Index | Old Index