Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/vax/vax Rewrite the page table entry routines. Don'...



details:   https://anonhg.NetBSD.org/src/rev/59c509e062b1
branches:  trunk
changeset: 474542:59c509e062b1
user:      ragge <ragge%NetBSD.org@localhost>
date:      Sat Jul 10 22:04:59 1999 +0000

description:
Rewrite the page table entry routines. Don't take a pte invalid fault for
missing pte's, instead map in pte entries in pmap_enter(). The user ptes
is no more handled by the VM system. All this made swapping start working
on VAX again.
Still to do:
- Keep refcount per pte page, so that those pages get free'd when the
  process is swapped out. Right now they are only free'd when the pmap
  is destroyed.

Many thanks to Chuck Silvers for all help finding the deadlock problems.

diffstat:

 sys/arch/vax/vax/pmap.c |  208 ++++++++++++++++++++++++++++++++++-------------
 sys/arch/vax/vax/trap.c |   33 ++-----
 2 files changed, 159 insertions(+), 82 deletions(-)

diffs (truncated from 525 to 300 lines):

diff -r 326030dcadb9 -r 59c509e062b1 sys/arch/vax/vax/pmap.c
--- a/sys/arch/vax/vax/pmap.c   Sat Jul 10 21:55:17 1999 +0000
+++ b/sys/arch/vax/vax/pmap.c   Sat Jul 10 22:04:59 1999 +0000
@@ -1,6 +1,6 @@
-/*     $NetBSD: pmap.c,v 1.68 1999/07/08 18:11:02 thorpej Exp $           */
+/*     $NetBSD: pmap.c,v 1.69 1999/07/10 22:04:59 ragge Exp $     */
 /*
- * Copyright (c) 1994, 1998 Ludd, University of Lule}, Sweden.
+ * Copyright (c) 1994, 1998, 1999 Ludd, University of Lule}, Sweden.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -29,10 +29,13 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include "opt_ddb.h"
+
 #include <sys/types.h>
 #include <sys/param.h>
 #include <sys/queue.h>
 #include <sys/malloc.h>
+#include <sys/extent.h>
 #include <sys/proc.h>
 #include <sys/user.h>
 #include <sys/systm.h>
@@ -82,12 +85,26 @@
 vaddr_t        iospace;
 
 vaddr_t ptemapstart, ptemapend;
-vm_map_t pte_map;
-struct vm_map  pte_map_store;
+struct extent *ptemap;
+#define        PTMAPSZ EXTENT_FIXED_STORAGE_SIZE(100)
+char   ptmapstorage[PTMAPSZ];
 
 extern caddr_t msgbufaddr;
 
 #ifdef PMAPDEBUG
+volatile int recurse;
+#define RECURSESTART {                                                 \
+       if (recurse)                                                    \
+               printf("enter at %d, previous %d\n", __LINE__, recurse);\
+       recurse = __LINE__;                                             \
+}
+#define RECURSEEND {recurse = 0; }
+#else
+#define RECURSESTART
+#define RECURSEEND
+#endif
+
+#ifdef PMAPDEBUG
 int    startpmapdebug = 0;
 #endif
 
@@ -195,6 +212,10 @@
        qdearly();
 #endif
 
+       /* User page table map. This is big. */
+       MAPVIRT(ptemapstart, USRPTSIZE);
+       ptemapend = virtual_avail;
+
        MAPVIRT(iospace, IOSPSZ); /* Device iospace mapping area */
 
        /* Init SCB and set up stray vectors. */
@@ -214,9 +235,11 @@
        printf("Sysmap %p, istack %lx, scratch %p\n",Sysmap,istack,scratch);
        printf("etext %p\n", &etext);
        printf("SYSPTSIZE %x\n",sysptsize);
-       printf("pv_table %p, \n", pv_table);
+       printf("pv_table %p, ptemapstart %lx ptemapend %lx\n",
+           pv_table, ptemapstart, ptemapend);
        printf("avail_start %lx, avail_end %lx\n",avail_start,avail_end);
-       printf("virtual_avail %lx,virtual_end %lx\n",virtual_avail,virtual_end);
+       printf("virtual_avail %lx,virtual_end %lx\n",
+           virtual_avail, virtual_end);
        printf("startpmapdebug %p\n",&startpmapdebug);
 #endif
 
@@ -247,7 +270,6 @@
        mtpr(1, PR_MAPEN);
 }
 
-#ifdef PMAP_STEAL_MEMORY
 /*
  * Let the VM system do early memory allocation from the direct-mapped
  * physical memory instead.
@@ -286,32 +308,23 @@
        bzero((caddr_t)v, size);
        return v;
 }
-#else
-/*
- * How much virtual space does this kernel have?
- * (After mapping kernel text, data, etc.)
- */
-void
-pmap_virtual_space(v_start, v_end)
-       vaddr_t *v_start;
-       vaddr_t *v_end;
-{
-       *v_start = virtual_avail;
-       *v_end   = virtual_end;
-}
-#endif
 
 /*
  * pmap_init() is called as part of vm init after memory management
  * is enabled. It is meant to do machine-specific allocations.
- * Here we allocate virtual memory for user page tables.
+ * Here is the resource map for the user page tables inited.
  */
 void 
 pmap_init() 
 {
-       /* reserve place on SPT for UPT */
-       pte_map = uvm_km_suballoc(kernel_map, &ptemapstart, &ptemapend, 
-           USRPTSIZE * 4 * maxproc, TRUE, FALSE, &pte_map_store);
+        /*
+         * Create the extent map used to manage the page table space.
+        * XXX - M_HTABLE is bogus.
+         */
+        ptemap = extent_create("ptemap", ptemapstart, ptemapend,
+            M_HTABLE, ptmapstorage, PTMAPSZ, EX_NOCOALESCE);
+        if (ptemap == NULL)
+               panic("pmap_init");
 }
 
 
@@ -356,14 +369,17 @@
 pmap_pinit(pmap)
        pmap_t pmap;
 {
-       int bytesiz;
+       int bytesiz, res;
 
        /*
         * Allocate PTEs and stash them away in the pmap.
         * XXX Ok to use kmem_alloc_wait() here?
         */
        bytesiz = USRPTSIZE * sizeof(struct pte);
-       pmap->pm_p0br = (void *)uvm_km_valloc_wait(pte_map, bytesiz);
+       res = extent_alloc(ptemap, bytesiz, 4, 0, EX_WAITSPACE|EX_WAITOK,
+           (u_long *)&pmap->pm_p0br);
+       if (res)
+               panic("pmap_pinit");
        pmap->pm_p0lr = vax_btoc(MAXTSIZ + MAXDSIZ + MMAPSPACE) | AST_PCB;
        (vaddr_t)pmap->pm_p1br = (vaddr_t)pmap->pm_p0br + bytesiz - 0x800000;
        pmap->pm_p1lr = (0x200000 - vax_btoc(MAXSSIZ));
@@ -388,16 +404,30 @@
 pmap_release(pmap)
        struct pmap *pmap;
 {
+       vaddr_t saddr, eaddr;
+       paddr_t paddr;
+
 #ifdef PMAPDEBUG
 if(startpmapdebug)printf("pmap_release: pmap %p\n",pmap);
 #endif
 
-       if (pmap->pm_p0br)
-               uvm_km_free_wakeup(pte_map, (vaddr_t)pmap->pm_p0br, 
-                   USRPTSIZE * sizeof(struct pte));
+       if (pmap->pm_p0br == 0)
+               return;
+
+       saddr = (vaddr_t)pmap->pm_p0br;
+       eaddr = saddr + USRPTSIZE * sizeof(struct pte);
+       for (; saddr < eaddr; saddr += NBPG) {
+               paddr = (kvtopte(saddr)->pg_pfn << VAX_PGSHIFT);
+               if (paddr == 0)
+                       continue; /* page not mapped */
+               bzero(kvtopte(saddr), sizeof(struct pte) * 8); /* XXX */
+               uvm_pagefree(PHYS_TO_VM_PAGE(paddr));
+       }
+       extent_free(ptemap, (u_long)pmap->pm_p0br,
+           USRPTSIZE * sizeof(struct pte), EX_WAITOK);
+       mtpr(0, PR_TBIA);
 }
 
-
 /*
  * pmap_destroy(pmap): Remove a reference from the pmap. 
  * If the pmap is NULL then just return else decrese pm_count.
@@ -444,6 +474,7 @@
        printf("rensa: pv %p clp 0x%x ptp %p\n", pv, clp, ptp);
 #endif
        s = splimp();
+       RECURSESTART;
        if (pv->pv_pte == ptp) {
                g = (int *)pv->pv_pte;
                if ((pv->pv_attr & (PG_V|PG_M)) == 0)
@@ -452,6 +483,7 @@
                pv->pv_pmap->pm_stats.resident_count--;
                pv->pv_pmap = 0;
                splx(s);
+               RECURSEEND;
                return;
        }
        for (pl = pv; pl->pv_next; pl = pl->pv_next) {
@@ -465,6 +497,7 @@
                        pf->pv_pmap->pm_stats.resident_count--;
                        FREE(pf, M_VMPVENT);
                        splx(s);
+                       RECURSEEND;
                        return;
                }
        }
@@ -592,33 +625,67 @@
        if (pmap == 0)
                return;
 
+       RECURSESTART;
        /* Find addess of correct pte */
        if (v & KERNBASE) {
                patch = (int *)Sysmap;
                i = (v - KERNBASE) >> VAX_PGSHIFT;
                newpte = (p>>VAX_PGSHIFT)|(prot&VM_PROT_WRITE?PG_KW:PG_KR);
-       } else if (v < 0x40000000) {
-               patch = (int *)pmap->pm_p0br;
-               i = (v >> VAX_PGSHIFT);
-               if (i >= (pmap->pm_p0lr & ~AST_MASK))
-                       panic("P0 too small in pmap_enter");
-               patch = (int *)pmap->pm_p0br;
-               newpte = (p>>VAX_PGSHIFT)|(prot&VM_PROT_WRITE?PG_RW:PG_RO);
        } else {
-               patch = (int *)pmap->pm_p1br;
-               i = (v - 0x40000000) >> VAX_PGSHIFT;
-               if (i < pmap->pm_p1lr)
-                       panic("pmap_enter: must expand P1");
-               if (v < pmap->pm_stack)
-                       pmap->pm_stack = v;
-               newpte = (p>>VAX_PGSHIFT)|(prot&VM_PROT_WRITE?PG_RW:PG_RO);
+               if (v < 0x40000000) {
+                       patch = (int *)pmap->pm_p0br;
+                       i = (v >> VAX_PGSHIFT);
+                       if (i >= (pmap->pm_p0lr & ~AST_MASK))
+                               panic("P0 too small in pmap_enter");
+                       patch = (int *)pmap->pm_p0br;
+                       newpte = (p >> VAX_PGSHIFT) |
+                           (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
+               } else {
+                       patch = (int *)pmap->pm_p1br;
+                       i = (v - 0x40000000) >> VAX_PGSHIFT;
+                       if (i < pmap->pm_p1lr)
+                               panic("pmap_enter: must expand P1");
+                       if (v < pmap->pm_stack)
+                               pmap->pm_stack = v;
+                       newpte = (p >> VAX_PGSHIFT) |
+                           (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
+               }
+               /* Check for PTE page */
+               if (kvtopte(&patch[i])->pg_pfn == 0) {
+                       vaddr_t ptaddr = trunc_page(&patch[i]);
+                       paddr_t phys;
+                       struct vm_page *pg;
+
+                       pg = uvm_pagealloc(NULL, 0, NULL, 0);
+                       if (pg == NULL)
+                               panic("pmap_ptefault"); /* XXX */
+                       phys = VM_PAGE_TO_PHYS(pg);
+                       bzero((caddr_t)(phys|KERNBASE), NBPG);
+                       pmap_kenter_pa(ptaddr, phys,
+                           VM_PROT_READ|VM_PROT_WRITE);
+               }
        }
 
+#if 0
+       /*
+        * Map in a pte page, if needed.
+        */
+       if (kvtopte(&patch[i])->pg_pfn == 0) {
+               int g;
+
+               g = pmap_ptefault(pmap, v);
+               if (g)
+                       panic("pmap_enter: %d\n", g);
+       }
+#endif
+
        oldpte = patch[i] & ~(PG_V|PG_M);
 
        /* No mapping change. Can this happen??? */
-       if (newpte == oldpte)
+       if (newpte == oldpte) {
+               RECURSEEND;
                return;
+       }
 
        pv = pv_table + (p >> PGSHIFT);
 
@@ -631,8 +698,10 @@
                 * This can be done more efficient than pmap_page_protect().
                 */
                if (oldpte) {
+                       RECURSEEND;
                        pmap_page_protect(PHYS_TO_VM_PAGE((oldpte
                            << VAX_PGSHIFT)), 0);
+                       RECURSESTART;



Home | Main Index | Thread Index | Old Index