Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/sparc/sparc Finish PV table locking. Terminology an...



details:   https://anonhg.NetBSD.org/src/rev/b84c6bc85cd2
branches:  trunk
changeset: 543226:b84c6bc85cd2
user:      pk <pk%NetBSD.org@localhost>
date:      Wed Feb 19 22:27:08 2003 +0000

description:
Finish PV table locking. Terminology and locking strategy stolen from
the alpha port.

diffstat:

 sys/arch/sparc/sparc/pmap.c |  405 +++++++++++++++++++++++++------------------
 1 files changed, 237 insertions(+), 168 deletions(-)

diffs (truncated from 789 to 300 lines):

diff -r bcb2e3a817b6 -r b84c6bc85cd2 sys/arch/sparc/sparc/pmap.c
--- a/sys/arch/sparc/sparc/pmap.c       Wed Feb 19 19:15:28 2003 +0000
+++ b/sys/arch/sparc/sparc/pmap.c       Wed Feb 19 22:27:08 2003 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.241 2003/02/18 22:05:08 pk Exp $ */
+/*     $NetBSD: pmap.c,v 1.242 2003/02/19 22:27:08 pk Exp $ */
 
 /*
  * Copyright (c) 1996
@@ -166,6 +166,59 @@
 psize_t vm_num_phys;
 
 /*
+ * Locking:
+ *
+ *     This pmap module uses two types of locks: `normal' (sleep)
+ *     locks and `simple' (spin) locks.  They are used as follows:
+ *
+ *     READ/WRITE SPIN LOCKS
+ *     ---------------------
+ *
+ *     * pmap_main_lock - This lock is used to prevent deadlock and/or
+ *       provide mutex access to the pmap module.  Most operations lock
+ *       the pmap first, then PV lists as needed.  However, some operations,
+ *       such as pmap_page_protect(), lock the PV lists before locking
+ *       the pmaps.  To prevent deadlock, we require a mutex lock on the
+ *       pmap module if locking in the PV->pmap direction.  This is
+ *       implemented by acquiring a (shared) read lock on pmap_main_lock
+ *       if locking pmap->PV and a (exclusive) write lock if locking in
+ *       the PV->pmap direction.  Since only one thread can hold a write
+ *       lock at a time, this provides the mutex.
+ *
+ *     SIMPLE LOCKS
+ *     ------------
+ *
+ *     * pm_slock (per-pmap) - This lock protects all of the members
+ *       of the pmap structure itself. Note that in the case of the
+ *       kernel pmap, interrupts which cause memory allocation *must*
+ *       be blocked while this lock is asserted.
+ *
+ *     * pv_slock (per-vm_page) - This lock protects the PV list
+ *       for a specified managed page.
+ *
+ *     All internal functions which operate on a pmap are called
+ *     with the pmap already locked by the caller (which will be
+ *     an interface function).
+ */
+struct lock pmap_main_lock;
+
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+#define        PMAP_MAP_TO_HEAD_LOCK() \
+       spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
+#define        PMAP_MAP_TO_HEAD_UNLOCK() \
+       spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
+#define        PMAP_HEAD_TO_MAP_LOCK() \
+       spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
+#define        PMAP_HEAD_TO_MAP_UNLOCK() \
+       spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
+#else
+#define        PMAP_MAP_TO_HEAD_LOCK()         /* nothing */
+#define        PMAP_MAP_TO_HEAD_UNLOCK()       /* nothing */
+#define        PMAP_HEAD_TO_MAP_LOCK()         /* nothing */
+#define        PMAP_HEAD_TO_MAP_UNLOCK()       /* nothing */
+#endif /* MULTIPROCESSOR || LOCKDEBUG */
+
+/*
  * Flags in pvlist.pv_flags.  Note that PV_MOD must be 1 and PV_REF must be 2
  * since they must line up with the bits in the hardware PTEs (see pte.h).
  * SUN4M bits are at a slightly different location in the PTE.
@@ -461,7 +514,7 @@
                                int, struct vm_page *, int));
 /*static*/ void pv_changepte4m __P((struct vm_page *, int, int));
 /*static*/ int  pv_syncflags4m __P((struct vm_page *));
-/*static*/ int  pv_link4m __P((struct vm_page *, struct pmap *, vaddr_t, int));
+/*static*/ int  pv_link4m __P((struct vm_page *, struct pmap *, vaddr_t, u_int *));
 /*static*/ void pv_unlink4m __P((struct vm_page *, struct pmap *, vaddr_t));
 #endif
 
@@ -2425,10 +2478,12 @@
        pv = VM_MDPAGE_PVHEAD(pg);
 
        s = splvm();                    /* paranoid? */
+       PMAP_HEAD_TO_MAP_LOCK();
+       simple_lock(&pg->mdpage.pv_slock);
        if (pv->pv_pmap == NULL) {
-               splx(s);
-               return;
-       }
+               goto out;
+       }
+
        for (; pv != NULL; pv = pv->pv_next) {
                int tpte;
                pm = pv->pv_pmap;
@@ -2457,6 +2512,10 @@
                    &sp->sg_pte[VA_SUN4M_VPG(va)], bic, bis, pm->pm_ctxnum,
                    PMAP_CPUSET(pm)));
        }
+
+out:
+       simple_unlock(&pg->mdpage.pv_slock);
+       PMAP_HEAD_TO_MAP_UNLOCK();
        splx(s);
 }
 
@@ -2481,21 +2540,24 @@
        pv = VM_MDPAGE_PVHEAD(pg);
 
        s = splvm();                    /* paranoid? */
+       PMAP_HEAD_TO_MAP_LOCK();
        if (pv->pv_pmap == NULL) {      /* paranoid */
-               splx(s);
-               return (0);
+               flags = 0;
+               goto out;
        }
 
        simple_lock(&pg->mdpage.pv_slock);
        flags = pv->pv_flags;
        for (; pv != NULL; pv = pv->pv_next) {
                pm = pv->pv_pmap;
+               simple_lock(&pm->pm_lock);
                va = pv->pv_va;
                vr = VA_VREG(va);
                vs = VA_VSEG(va);
                rp = &pm->pm_regmap[vr];
                sp = &rp->rg_segmap[vs];
                if (sp->sg_pte == NULL) {
+                       simple_unlock(&pm->pm_lock);
                        continue;
                }
 
@@ -2535,9 +2597,13 @@
                                    PMAP_CPUSET(pm));
                        }
                }
-       }
+               simple_unlock(&pm->pm_lock);
+       }
+
        VM_MDPAGE_PVHEAD(pg)->pv_flags = flags;
        simple_unlock(&pg->mdpage.pv_slock);
+out:
+       PMAP_HEAD_TO_MAP_UNLOCK();
        splx(s);
        return (flags);
 }
@@ -2582,13 +2648,16 @@
                         */
                        pv0->pv_pmap = NULL;
                        pv0->pv_flags &= ~(PV_NC|PV_ANC);
-                       simple_unlock(&pg->mdpage.pv_slock);
-                       return;
+                       goto out;
                }
        } else {
                struct pvlist *prev;
 
                for (prev = pv0;; prev = npv, npv = npv->pv_next) {
+                       if (npv == NULL) {
+                               printf("pm %p is missing ", pm);
+                               goto out;
+                       }
                        pmap_stats.ps_unlink_pvsearch++;
                        if (npv->pv_pmap == pm && npv->pv_va == va)
                                break;
@@ -2596,7 +2665,6 @@
                prev->pv_next = npv->pv_next;
                pool_put(&pv_pool, npv);
        }
-       simple_unlock(&pg->mdpage.pv_slock);
 
        if ((pv0->pv_flags & (PV_NC|PV_ANC)) == PV_ANC) {
 
@@ -2607,7 +2675,7 @@
                for (npv = pv0->pv_next; npv != NULL; npv = npv->pv_next)
                        if (BADALIAS(va, npv->pv_va) ||
                            (npv->pv_flags & PV_NC) != 0)
-                               return;
+                               goto out;
 #ifdef DEBUG
                if (pmapdebug & PDB_CACHESTUFF)
                        printf(
@@ -2618,25 +2686,28 @@
                pv0->pv_flags &= ~PV_ANC;
                pv_changepte4m(pg, SRMMU_PG_C, 0);
        }
+
+out:
+       simple_unlock(&pg->mdpage.pv_slock);
 }
 
 /*
  * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
- * It returns SRMMU_PG_C if the (new) pvlist says that the address cannot
- * be cached (i.e. its results must be (& ~)'d in.
+ * May turn off the cacheable bit in the pte prototype for the new mapping.
+ * Called with pm locked.
  */
 /*static*/ int
-pv_link4m(pg, pm, va, nc)
+pv_link4m(pg, pm, va, pteprotop)
        struct vm_page *pg;
        struct pmap *pm;
        vaddr_t va;
-       int nc;
+       unsigned int *pteprotop;
 {
        struct pvlist *pv0, *npv;
-       int ret;
+       int nc = (*pteprotop & SRMMU_PG_C) == 0 ? PV_NC : 0;
+       int error = 0;
 
        pv0 = VM_MDPAGE_PVHEAD(pg);
-       ret = nc ? SRMMU_PG_C : 0;
        simple_lock(&pg->mdpage.pv_slock);
 
        if (pv0->pv_pmap == NULL) {
@@ -2645,60 +2716,64 @@
                pv0->pv_next = NULL;
                pv0->pv_pmap = pm;
                pv0->pv_va = va;
-               pv0->pv_flags |= nc ? PV_NC : 0;
-               simple_unlock(&pg->mdpage.pv_slock);
-               return (ret);
-       }
-
-       /*
-        * Before entering the new mapping, see if
-        * it will cause old mappings to become aliased
-        * and thus need to be `discached'.
-        */
+               pv0->pv_flags |= nc;
+               goto out;
+       }
 
        pmap_stats.ps_enter_secondpv++;
-       if ((pv0->pv_flags & PV_ANC) != 0) {
-               /* already uncached, just stay that way */
-               ret = SRMMU_PG_C;
-       } else {
-               for (npv = pv0; npv != NULL; npv = npv->pv_next) {
-                       if ((npv->pv_flags & PV_NC) != 0) {
-                               ret = SRMMU_PG_C;
-#ifdef DEBUG
-                               /* Check currently illegal condition */
-                               if (nc == 0)
-                                       printf("pv_link: proc %s, va=0x%lx: "
-                               "unexpected uncached mapping at 0x%lx\n",
-                                           curproc ? curproc->p_comm : "--",
-                                           va, npv->pv_va);
-#endif
-                       }
-                       if (BADALIAS(va, npv->pv_va)) {
-#ifdef DEBUG
-                               if (pmapdebug & PDB_CACHESTUFF)
-                                       printf(
-                       "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pg %p\n",
-                                       curproc ? curproc->p_comm : "--",
-                                       va, npv->pv_va, pg);
-#endif
-                               /* Mark list head `uncached due to aliases' */
-                               pv0->pv_flags |= PV_ANC;
-                               pv_changepte4m(pg, 0, ret = SRMMU_PG_C);
-                               break;
-                       }
-               }
-       }
 
        npv = pool_get(&pv_pool, PR_NOWAIT);
-       if (npv == NULL)
-               panic("pv_link: pv_pool exhausted");
+       if (npv == NULL) {
+               error = ENOMEM;
+               goto out;
+       }
        npv->pv_next = pv0->pv_next;
        npv->pv_pmap = pm;
        npv->pv_va = va;
-       npv->pv_flags = nc ? PV_NC : 0;
+       npv->pv_flags = nc;
        pv0->pv_next = npv;
+
+       /*
+        * See if the new mapping will cause old mappings to
+        * become aliased and thus need to be `discached'.
+        */
+       if ((pv0->pv_flags & PV_ANC) != 0) {
+               /* already uncached, just stay that way */
+               *pteprotop &= ~SRMMU_PG_C;
+               goto out;
+       }
+
+       for (npv = pv0; npv != NULL; npv = npv->pv_next) {
+               if ((npv->pv_flags & PV_NC) != 0) {
+                       *pteprotop &= ~SRMMU_PG_C;
+#ifdef DEBUG
+                       /* Check currently illegal condition */
+                       if (nc == 0)
+                               printf("pv_link: proc %s, va=0x%lx: "



Home | Main Index | Thread Index | Old Index