Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/hppa Not all PA CPUs have the U-bit (uncacheable) f...



details:   https://anonhg.NetBSD.org/src/rev/201ce19a40f0
branches:  trunk
changeset: 753168:201ce19a40f0
user:      skrll <skrll%NetBSD.org@localhost>
date:      Fri Mar 19 07:29:44 2010 +0000

description:
Not all PA CPUs have the U-bit (uncacheable) for non-IO memory.  In fact
most don't.  Deal with non-equivalent aliases by removing and flushing the
managed mappings, and flushing the unmanaged mappings.

When flushing caches/TLB flush the cache before purging the TLB just in
case the flush enters the mapping into the TLB.

diffstat:

 sys/arch/hppa/hppa/pmap.c    |  241 +++++++++++-------------------------------
 sys/arch/hppa/include/pmap.h |   31 +----
 2 files changed, 70 insertions(+), 202 deletions(-)

diffs (truncated from 468 to 300 lines):

diff -r 4506f65bcaba -r 201ce19a40f0 sys/arch/hppa/hppa/pmap.c
--- a/sys/arch/hppa/hppa/pmap.c Fri Mar 19 04:19:28 2010 +0000
+++ b/sys/arch/hppa/hppa/pmap.c Fri Mar 19 07:29:44 2010 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.72 2010/03/16 16:20:19 skrll Exp $  */
+/*     $NetBSD: pmap.c,v 1.73 2010/03/19 07:29:44 skrll Exp $  */
 
 /*-
  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
@@ -65,7 +65,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.72 2010/03/16 16:20:19 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.73 2010/03/19 07:29:44 skrll Exp $");
 
 #include "opt_cputype.h"
 
@@ -225,13 +225,12 @@
 void pmap_dump_pv(paddr_t);
 #endif
 
-void pmap_check_alias(struct vm_page *, struct pv_entry *, vaddr_t,
-    pt_entry_t *);
-static bool __changebit(struct vm_page *, u_int, u_int);
+void pmap_page_remove_locked(struct vm_page *);
+int pmap_check_alias(struct vm_page *, vaddr_t, pt_entry_t);
 
 /* un-invert PVF_REF */
 #define pmap_pvh_attrs(a) \
-       (((a) & (PVF_MOD|PVF_REF|PVF_WRITE|PVF_UNCACHEABLE)) ^ PVF_REF)
+       (((a) & (PVF_MOD|PVF_REF)) ^ PVF_REF)
 
 #define PMAP_LOCK(pm)                                  \
        do {                                            \
@@ -426,11 +425,11 @@
 {
 
        fdcache(pmap->pm_space, va, PAGE_SIZE);
-       pdtlb(pmap->pm_space, va);
        if (pte & PTE_PROT(TLB_EXECUTE)) {
                ficache(pmap->pm_space, va, PAGE_SIZE);
                pitlb(pmap->pm_space, va);
        }
+       pdtlb(pmap->pm_space, va);
 #ifdef USE_HPT
        if (pmap_hpt) {
                struct hpt_entry *hpt;
@@ -511,112 +510,28 @@
 }
 #endif
 
-/*
- * Check for non-equiv aliases for this page and the mapping being added or
- * removed. If, when adding, we find a new non-equiv alias then mark all PTEs
- * as uncacheable including the one we're checking. If, when removing, there
- * are no non-equiv aliases left then we mark PTEs as cacheable.
- *
- * - Shouldn't be called for pages that have been marked uncacheable by
- *   pmap_kenter_pa.
- * - Must be called with pg->mdpage.pvh_lock held.
- */
-void
-pmap_check_alias(struct vm_page *pg, struct pv_entry *pve, vaddr_t va,
-    pt_entry_t *ptep)
+int
+pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte)
 {
-       bool nonequiv = false;
-       struct pv_entry *tpve;
-       u_int attrs;
+       struct pv_entry *pve;
+       int ret = 0;
+
+       /* check for non-equ aliased mappings */
+       for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) {
+               vaddr_t pva = pve->pv_va & PV_VAMASK;
 
-       DPRINTF(PDB_FOLLOW|PDB_ALIAS,
-           ("%s(%p, %p, 0x%lx, %p)\n", __func__, pg, pve, va, ptep));
-
-       /* we should only be looking if we're not PVF_NC */
-       KASSERT((pg->mdpage.pvh_attrs & PVF_NC) == 0);
-       KASSERT(mutex_owned(&pg->mdpage.pvh_lock));
+               pte |= pmap_vp_find(pve->pv_pmap, pva);
+               if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) &&
+                   (pte & PTE_PROT(TLB_WRITE))) {
 
-       if (ptep) {
-               attrs = pmap_pvh_attrs(*ptep);
-
-               DPRINTF(PDB_FOLLOW|PDB_ALIAS,
-                   ("%s: va 0x%08lx attrs 0x%08x (new)\n", __func__, va,
-                   attrs));
-       } else {
-               attrs = 0;
-
-               DPRINTF(PDB_FOLLOW|PDB_ALIAS,
-                   ("%s: va 0x%08lx (removed)\n", __func__, va));
+                       DPRINTF(PDB_FOLLOW|PDB_ALIAS,
+                            ("%s: "aliased writable mapping 0x%x:0x%x\n",
+                            __func__, pve->pv_pmap->pm_space, pve->pv_va));
+                       ret++;
+               }
        }
 
-       /*
-        * Add in flags for existing mappings and check if mapping we're
-        * adding/removing is an non-equiv aliases of the other mappings.
-        */
-       for (tpve = pve; tpve; tpve = tpve->pv_next) {
-               pt_entry_t pte;
-               vaddr_t tva = tpve->pv_va & PV_VAMASK;
-
-               /* XXX LOCK */
-               pte = pmap_vp_find(tpve->pv_pmap, tva);
-               attrs |= pmap_pvh_attrs(pte);
-
-               if (((va ^ tva) & HPPA_PGAOFF) != 0)
-                       nonequiv = true;
-
-               DPRINTF(PDB_FOLLOW|PDB_ALIAS,
-                   ("%s: va 0x%08x:0x%08lx attrs 0x%08x %s\n", __func__,
-                   tpve->pv_pmap->pm_space, tpve->pv_va & PV_VAMASK,
-                   pmap_pvh_attrs(pte), nonequiv ? "alias" : ""));
-       }
-
-       if (!nonequiv) {
-               /*
-                * Inherit uncacheable attribute if set as it means we already
-                * have non-equiv aliases.
-                */
-               if (ptep && (attrs & PVF_UNCACHEABLE) != 0)
-                       *ptep |= PTE_PROT(TLB_UNCACHEABLE);
-
-               /* No more to be done. */
-               return;
-       }
-
-       if (ptep) {
-               if ((attrs & (PVF_WRITE|PVF_MOD)) != 0) {
-                       /*
-                        * We have non-equiv aliases and the new/some 
-                        * mapping(s) is/are writable (or modified). We must
-                        * mark all mappings as uncacheable (if they're not
-                        * already marked as such).
-                        */
-                       pg->mdpage.pvh_aliases++;
-
-                       if ((attrs & PVF_UNCACHEABLE) == 0)
-                               __changebit(pg, PVF_UNCACHEABLE, 0);
-
-                       *ptep |= PTE_PROT(TLB_UNCACHEABLE);
-
-                       DPRINTF(PDB_FOLLOW|PDB_ALIAS,
-                           ("%s: page marked uncacheable\n", __func__));
-               }
-       } else {
-               if ((attrs & PVF_UNCACHEABLE) != 0) {
-                       /*
-                        * We've removed a non-equiv aliases. We can now mark
-                        * it cacheable if all non-equiv aliases are gone.
-                        */
-
-                       pg->mdpage.pvh_aliases--;
-                       if (pg->mdpage.pvh_aliases == 0) {
-                               __changebit(pg, 0, PVF_UNCACHEABLE);
-
-                               DPRINTF(PDB_FOLLOW|PDB_ALIAS,
-                                   ("%s: page re-marked cacheable\n",
-                                   __func__));
-                       }
-               }
-       }
+        return (ret);
 }
 
 /*
@@ -1308,7 +1223,6 @@
        }
 
        if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) {
-               mutex_enter(&pg->mdpage.pvh_lock);
 
                if (!pve && !(pve = pmap_pv_alloc())) {
                        if (flags & PMAP_CANFAIL) {
@@ -1318,9 +1232,11 @@
                        }
                        panic("%s: no pv entries available", __func__);
                }
+                pte |= PTE_PROT(pmap_prot(pmap, prot));
+               mutex_enter(&pg->mdpage.pvh_lock);
+               if (pmap_check_alias(pg, va, pte))
+                       pmap_page_remove_locked(pg);
                pmap_pv_enter(pg, pve, pmap, va, ptp, 0);
-               pmap_check_alias(pg, pve, va, &pte);
-
                mutex_exit(&pg->mdpage.pvh_lock);
        } else if (pve) {
                pmap_pv_free(pve);
@@ -1394,10 +1310,6 @@
                                mutex_enter(&pg->mdpage.pvh_lock);
 
                                pve = pmap_pv_remove(pg, pmap, sva);
-
-                               pmap_check_alias(pg, pg->mdpage.pvh_list,
-                                   sva, NULL);
-
                                pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
 
                                mutex_exit(&pg->mdpage.pvh_lock);
@@ -1467,6 +1379,15 @@
 void
 pmap_page_remove(struct vm_page *pg)
 {
+
+       mutex_enter(&pg->mdpage.pvh_lock);
+       pmap_page_remove_locked(pg);
+       mutex_exit(&pg->mdpage.pvh_lock);
+}
+
+void
+pmap_page_remove_locked(struct vm_page *pg)
+{
        struct pv_entry *pve, *npve, **pvp;
 
        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg));
@@ -1474,7 +1395,6 @@
        if (pg->mdpage.pvh_list == NULL)
                return;
 
-       mutex_enter(&pg->mdpage.pvh_lock);
        pvp = &pg->mdpage.pvh_list;
        for (pve = pg->mdpage.pvh_list; pve; pve = npve) {
                pmap_t pmap = pve->pv_pmap;
@@ -1495,22 +1415,21 @@
                if (pve->pv_va & PV_KENTER) {
                        *pvp = pve;
                        pvp = &pve->pv_next;
-                       continue;
-               }
-
-               pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
+               } else
+                       pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
 
                pmap_pte_flush(pmap, va, pte);
                if (pte & PTE_PROT(TLB_WIRED))
                        pmap->pm_stats.wired_count--;
                pmap->pm_stats.resident_count--;
 
-               pmap_pte_set(pde, va, 0);
-               pmap_pv_free(pve);
+               if (!(pve->pv_va & PV_KENTER)) {
+                       pmap_pte_set(pde, va, 0);
+                       pmap_pv_free(pve);
+               }
                PMAP_UNLOCK(pmap);
        }
        *pvp = NULL;
-       mutex_exit(&pg->mdpage.pvh_lock);
 
        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: leaving\n", __func__));
 }
@@ -1553,30 +1472,16 @@
 bool
 pmap_changebit(struct vm_page *pg, u_int set, u_int clear)
 {
-       bool rv;
+       struct pv_entry *pve;
+       int res;
 
        DPRINTF(PDB_FOLLOW|PDB_BITS, 
            ("%s(%p, %x, %x)\n", __func__, pg, set, clear));
 
-       mutex_enter(&pg->mdpage.pvh_lock);
-       rv = __changebit(pg, set, clear);
-       mutex_exit(&pg->mdpage.pvh_lock);
-
-       return rv;
-}
+       KASSERT((set & ~(PVF_REF)) == 0);
+       KASSERT((clear & ~(PVF_MOD|PVF_WRITE)) == 0);
 
-/*
- * Must be called with pg->mdpage.pvh_lock held.
- */
-static bool
-__changebit(struct vm_page *pg, u_int set, u_int clear)
-{
-       struct pv_entry *pve;
-       int res;
-
-       KASSERT(mutex_owned(&pg->mdpage.pvh_lock));
-       KASSERT(((set | clear) &
-           ~(PVF_MOD|PVF_REF|PVF_UNCACHEABLE|PVF_WRITE)) == 0);
+       mutex_enter(&pg->mdpage.pvh_lock);
 
        /* preserve other bits */
        res = pg->mdpage.pvh_attrs & (set | clear);



Home | Main Index | Thread Index | Old Index