Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/sparc sun4/sun4c MMU: keep `wired' status per page, ...



details:   https://anonhg.NetBSD.org/src/rev/7e176a17a032
branches:  trunk
changeset: 550666:7e176a17a032
user:      pk <pk%NetBSD.org@localhost>
date:      Sat Aug 16 19:21:21 2003 +0000

description:
sun4/sun4c MMU: keep `wired' status per page, implemented by defining a bit
in the PTE word that is not used by the hardware. Use it to unlock a `pmeg'
if the wired count in a segment drops to zero.

diffstat:

 sys/arch/sparc/include/pmap.h |    3 +-
 sys/arch/sparc/sparc/pmap.c   |  511 ++++++++++++++++++++++++++++-------------
 2 files changed, 351 insertions(+), 163 deletions(-)

diffs (truncated from 1053 to 300 lines):

diff -r 79b5c24b9421 -r 7e176a17a032 sys/arch/sparc/include/pmap.h
--- a/sys/arch/sparc/include/pmap.h     Sat Aug 16 18:42:53 2003 +0000
+++ b/sys/arch/sparc/include/pmap.h     Sat Aug 16 19:21:21 2003 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.h,v 1.69 2003/08/12 15:13:13 pk Exp $ */
+/*     $NetBSD: pmap.h,v 1.70 2003/08/16 19:21:23 pk Exp $ */
 
 /*
  * Copyright (c) 1996
@@ -174,6 +174,7 @@
        int     *sg_pte;                /* points to NPTESG PTEs */
        pmeg_t  sg_pmeg;                /* the MMU segment number (4c) */
        u_char  sg_npte;                /* number of valid PTEs per seg */
+       int8_t  sg_nwired;              /* number of wired pages */
 };
 
 typedef struct pmap *pmap_t;
diff -r 79b5c24b9421 -r 7e176a17a032 sys/arch/sparc/sparc/pmap.c
--- a/sys/arch/sparc/sparc/pmap.c       Sat Aug 16 18:42:53 2003 +0000
+++ b/sys/arch/sparc/sparc/pmap.c       Sat Aug 16 19:21:21 2003 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.266 2003/08/14 11:00:02 hannken Exp $ */
+/*     $NetBSD: pmap.c,v 1.267 2003/08/16 19:21:21 pk Exp $ */
 
 /*
  * Copyright (c) 1996
@@ -56,7 +56,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.266 2003/08/14 11:00:02 hannken Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.267 2003/08/16 19:21:21 pk Exp $");
 
 #include "opt_ddb.h"
 #include "opt_kgdb.h"
@@ -864,8 +864,8 @@
        int pte;
 {
        struct pmap *pm;
-       struct regmap *rm;
-       struct segmap *sm;
+       struct regmap *rp;
+       struct segmap *sp;
 
        if (getcontext4m() != 0)
                panic("setpte4m: user context");
@@ -877,20 +877,18 @@
        if (pm->pm_regmap == NULL)
                panic("setpte4m: no regmap entry");
 #endif
-       rm = &pm->pm_regmap[VA_VREG(va)];
-       sm = &rm->rg_segmap[VA_VSEG(va)];
+       rp = &pm->pm_regmap[VA_VREG(va)];
+       sp = &rp->rg_segmap[VA_VSEG(va)];
 
 #ifdef DEBUG
-       if (rm->rg_segmap == NULL)
-               panic("setpte4m: no segmap for va %p (rp=%p)",
-                       (caddr_t)va, (caddr_t)rm);
-
-       if (sm->sg_pte == NULL)
-               panic("setpte4m: no pte for va %p (rp=%p, sp=%p)",
-                     (caddr_t)va, rm, sm);
+       if (rp->rg_segmap == NULL)
+               panic("setpte4m: no segmap for va %lx (rp=%p)", va, rp);
+
+       if (sp->sg_pte == NULL)
+               panic("setpte4m: no pte for va %lx (rp=%p,sp=%p)", va, rp, sp);
 #endif
        tlb_flush_page(va, 0, CPUSET_ALL);
-       setpgt4m(sm->sg_pte + VA_SUN4M_VPG(va), pte);
+       setpgt4m(sp->sg_pte + VA_SUN4M_VPG(va), pte);
 }
 
 /*
@@ -1551,10 +1549,12 @@
 /*
  * MMU management.
  */
-int    me_alloc(struct mmuq *, struct pmap *, int, int);
+static int     me_alloc(struct mmuq *, struct pmap *, int, int);
 static void    me_free(struct pmap *, u_int);
-int    region_alloc(struct mmuq *, struct pmap *, int);
-void   region_free(struct pmap *, u_int);
+#if defined(SUN4_MMU3L)
+static int     region_alloc(struct mmuq *, struct pmap *, int);
+static void    region_free(struct pmap *, u_int);
+#endif
 
 
 /*
@@ -1571,7 +1571,7 @@
  * since it implements the dynamic allocation of MMU entries.
  */
 
-int
+static __inline__ int
 me_alloc(mh, newpm, newvreg, newvseg)
        struct mmuq *mh;
        struct pmap *newpm;
@@ -1579,7 +1579,7 @@
 {
        struct mmuentry *me;
        struct pmap *pm;
-       int i, va, *pte, tpte;
+       int i, va, *ptep, pte;
        int ctx;
        struct regmap *rp;
        struct segmap *sp;
@@ -1647,7 +1647,7 @@
 
        rp = &pm->pm_regmap[me->me_vreg];
        sp = &rp->rg_segmap[me->me_vseg];
-       pte = sp->sg_pte;
+       ptep = sp->sg_pte;
 
 #ifdef DEBUG
        if (sp->sg_pmeg != me->me_cookie)
@@ -1712,13 +1712,14 @@
         */
        i = NPTESG;
        do {
-               tpte = getpte4(va);
-               if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
+               int swbits = *ptep & PG_MBZ;
+               pte = getpte4(va);
+               if ((pte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
                        struct vm_page *pg;
-                       if ((pg = pvhead4_4c(tpte)) != NULL)
-                               VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(tpte);
+                       if ((pg = pvhead4_4c(pte)) != NULL)
+                               VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(pte);
                }
-               *pte++ = tpte & ~(PG_U|PG_M);
+               *ptep++ = swbits | (pte & ~(PG_U|PG_M));
                va += NBPG;
        } while (--i > 0);
 
@@ -1977,7 +1978,7 @@
 }
 #endif /* SUN4_MMU3L */
 
-static __inline__ void
+static void
 mmu_pmeg_lock(int pmeg)
 {
        struct mmuentry *me = &mmusegments[pmeg];
@@ -1985,7 +1986,15 @@
        MMUQ_INSERT_TAIL(&segm_locked, me, me_list);
 }
 
-static __inline__ void
+static void
+mmu_pmeg_unlock(int pmeg)
+{
+       struct mmuentry *me = &mmusegments[pmeg];
+       MMUQ_REMOVE(me, me_list);
+       MMUQ_INSERT_TAIL(&segm_lru, me, me_list);
+}
+
+static void
 mmu_pagein_seg(struct pmap *pm, struct segmap *sp, vaddr_t va,
                int vr, int vs, struct mmuq *mh)
 {
@@ -2011,7 +2020,7 @@
        pte = sp->sg_pte;
        i = NPTESG;
        do {
-               setpte4(va, *pte++);
+               setpte4(va, *pte++ & ~PG_MBZ);
                va += NBPG;
        } while (--i > 0);
        splx(s);
@@ -2320,7 +2329,7 @@
        struct vm_page *pg;
        int bis, bic;
 {
-       int *pte;
+       int pte, *ptep;
        struct pvlist *pv;
        struct pmap *pm;
        int va, vr, vs;
@@ -2344,20 +2353,17 @@
                vs = VA_VSEG(va);
                rp = &pm->pm_regmap[vr];
                sp = &rp->rg_segmap[vs];
-               pte = sp->sg_pte;
+               ptep = &sp->sg_pte[VA_VPG(va)];
 
                if (sp->sg_pmeg == seginval) {
                        /* not in hardware: just fix software copy */
-                       pte += VA_VPG(va);
-                       *pte = (*pte | bis) & ~bic;
+                       *ptep = (*ptep | bis) & ~bic;
                } else {
-                       int tpte;
-
                        /* in hardware: fix hardware copy */
                        if (CTX_USABLE(pm,rp)) {
                                setcontext4(pm->pm_ctxnum);
                                /* XXX should flush only when necessary */
-                               tpte = getpte4(va);
+                               pte = getpte4(va);
                                /*
                                 * XXX: always flush cache; conservative, but
                                 * needed to invalidate cache tag protection
@@ -2371,14 +2377,13 @@
                                        setregmap(0, tregion);
                                setsegmap(0, sp->sg_pmeg);
                                va = VA_VPG(va) << PGSHIFT;
-                               tpte = getpte4(va);
+                               pte = getpte4(va);
                        }
-                       if (tpte & PG_V)
-                               VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(tpte);
-                       tpte = (tpte | bis) & ~bic;
-                       setpte4(va, tpte);
-                       if (pte != NULL)        /* update software copy */
-                               pte[VA_VPG(va)] = tpte;
+                       if (pte & PG_V)
+                               VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(pte);
+                       pte = (pte | bis) & ~bic;
+                       setpte4(va, pte);
+                       *ptep = (*ptep & PG_MBZ) | pte;
                }
        }
        setcontext4(ctx);
@@ -2398,7 +2403,7 @@
 {
        struct pvlist *pv;
        struct pmap *pm;
-       int tpte, va, vr, vs, pmeg, flags;
+       int pte, va, vr, vs, pmeg, flags;
        int ctx, s;
        struct regmap *rp;
        struct segmap *sp;
@@ -2424,8 +2429,8 @@
                if (CTX_USABLE(pm,rp)) {
                        setcontext4(pm->pm_ctxnum);
                        /* XXX should flush only when necessary */
-                       tpte = getpte4(va);
-                       if (tpte & PG_M)
+                       pte = getpte4(va);
+                       if (pte & PG_M)
                                cache_flush_page(va, pm->pm_ctxnum);
                } else {
                        /* Make temp map in ctx 0 to access the PTE */
@@ -2434,12 +2439,12 @@
                                setregmap(0, tregion);
                        setsegmap(0, pmeg);
                        va = VA_VPG(va) << PGSHIFT;
-                       tpte = getpte4(va);
+                       pte = getpte4(va);
                }
-               if (tpte & (PG_M|PG_U) && tpte & PG_V) {
-                       flags |= MR4_4C(tpte);
-                       tpte &= ~(PG_M|PG_U);
-                       setpte4(va, tpte);
+               if (pte & (PG_M|PG_U) && pte & PG_V) {
+                       flags |= MR4_4C(pte);
+                       pte &= ~(PG_M|PG_U);
+                       setpte4(va, pte);
                }
        }
 
@@ -3101,7 +3106,8 @@
 #if defined(SUN4_MMU3L)
        struct mmuentry *mmureg;
 #endif
-       struct   regmap *rp;
+       struct regmap *rp;
+       struct segmap *sp;
        int i, j;
        int npte, zseg, vr, vs;
        int startscookie, scookie;
@@ -3238,9 +3244,9 @@
                kernel_regmap_store[i].rg_segmap =
                        &kernel_segmap_store[i * NSEGRG];
                for (j = NSEGRG; --j >= 0;) {
-                       kernel_segmap_store[i * NSEGRG + j].sg_pmeg = seginval;
-                       kernel_segmap_store[i * NSEGRG + j].sg_pte =
-                               &kptes[(i * NSEGRG + j) * NPTESG];
+                       sp = &kernel_segmap_store[i * NSEGRG + j];
+                       sp->sg_pmeg = seginval;
+                       sp->sg_pte = &kptes[(i * NSEGRG + j) * NPTESG];
                }
        }
 
@@ -3391,13 +3397,14 @@
                mmuseg->me_pmap = pmap_kernel();
                mmuseg->me_vreg = vr;
                mmuseg->me_vseg = vs % NSEGRG;
-               rp->rg_segmap[vs % NSEGRG].sg_pmeg = scookie;
+               sp = &rp->rg_segmap[vs % NSEGRG];
+               sp->sg_pmeg = scookie;
                npte = ++scookie < zseg ? NPTESG : lastpage;
-               rp->rg_segmap[vs % NSEGRG].sg_npte = npte;
+               sp->sg_npte = npte;
+               sp->sg_nwired = npte;
                rp->rg_nsegmap += 1;



Home | Main Index | Thread Index | Old Index