Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/uebayasi-xip]: src/sys/arch/hppa Use VM_PAGE_TO_MD(). Only compile tested.



details:   https://anonhg.NetBSD.org/src/rev/67bcd96609b3
branches:  uebayasi-xip
changeset: 751602:67bcd96609b3
user:      uebayasi <uebayasi%NetBSD.org@localhost>
date:      Thu Feb 25 04:11:29 2010 +0000

description:
Use VM_PAGE_TO_MD().  Only compile tested.

diffstat:

 sys/arch/hppa/hppa/pmap.c    |  153 ++++++++++++++++++++++++------------------
 sys/arch/hppa/include/pmap.h |    6 +-
 2 files changed, 90 insertions(+), 69 deletions(-)

diffs (truncated from 467 to 300 lines):

diff -r 6ef76214da07 -r 67bcd96609b3 sys/arch/hppa/hppa/pmap.c
--- a/sys/arch/hppa/hppa/pmap.c Thu Feb 25 03:44:16 2010 +0000
+++ b/sys/arch/hppa/hppa/pmap.c Thu Feb 25 04:11:29 2010 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.63 2009/12/18 19:20:35 skrll Exp $  */
+/*     $NetBSD: pmap.c,v 1.63.2.1 2010/02/25 04:11:29 uebayasi Exp $   */
 
 /*-
  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
@@ -65,7 +65,10 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.63 2009/12/18 19:20:35 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.63.2.1 2010/02/25 04:11:29 uebayasi Exp $");
+
+#include "opt_device_page.h"
+#include "opt_xip.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -486,16 +489,18 @@
 pmap_dump_pv(paddr_t pa)
 {
        struct vm_page *pg;
+       struct vm_page_md *md;
        struct pv_entry *pve;
 
        pg = PHYS_TO_VM_PAGE(pa);
-       mutex_enter(&pg->mdpage.pvh_lock);
-       printf("pg %p attr 0x%08x aliases %d\n", pg, pg->mdpage.pvh_attrs,
-           pg->mdpage.pvh_aliases);
-       for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next)
+       md = VM_PAGE_TO_MD(pg);
+       mutex_enter(&md->pvh_lock);
+       printf("pg %p attr 0x%08x aliases %d\n", pg, md->pvh_attrs,
+           md->pvh_aliases);
+       for (pve = md->pvh_list; pve; pve = pve->pv_next)
                printf("%x:%lx\n", pve->pv_pmap->pm_space,
                    pve->pv_va & PV_VAMASK);
-       mutex_exit(&pg->mdpage.pvh_lock);
+       mutex_exit(&md->pvh_lock);
 }
 #endif
 
@@ -507,12 +512,13 @@
  *
  * - Shouldn't be called for pages that have been marked uncacheable by
  *   pmap_kenter_pa.
- * - Must be called with pg->mdpage.pvh_lock held.
+ * - Must be called with md->pvh_lock held.
  */
 void
 pmap_check_alias(struct vm_page *pg, struct pv_entry *pve, vaddr_t va,
     pt_entry_t *ptep)
 {
+       struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
        bool nonequiv = false;
        struct pv_entry *tpve;
        u_int attrs;
@@ -521,8 +527,8 @@
            ("%s(%p, %p, 0x%lx, %p)\n", __func__, pg, pve, va, ptep));
 
        /* we should only be looking if we're not PVF_NC */
-       KASSERT((pg->mdpage.pvh_attrs & PVF_NC) == 0);
-       KASSERT(mutex_owned(&pg->mdpage.pvh_lock));
+       KASSERT((md->pvh_attrs & PVF_NC) == 0);
+       KASSERT(mutex_owned(&md->pvh_lock));
 
        if (ptep) {
                attrs = pmap_pvh_attrs(*ptep);
@@ -578,7 +584,7 @@
                         * mark all mappings as uncacheable (if they're not
                         * already marked as such).
                         */
-                       pg->mdpage.pvh_aliases++;
+                       md->pvh_aliases++;
 
                        if ((attrs & PVF_UNCACHEABLE) == 0)
                                __changebit(pg, PVF_UNCACHEABLE, 0);
@@ -595,8 +601,8 @@
                         * it cacheable if all non-equiv aliases are gone.
                         */
 
-                       pg->mdpage.pvh_aliases--;
-                       if (pg->mdpage.pvh_aliases == 0) {
+                       md->pvh_aliases--;
+                       if (md->pvh_aliases == 0) {
                                __changebit(pg, 0, PVF_UNCACHEABLE);
 
                                DPRINTF(PDB_FOLLOW|PDB_ALIAS,
@@ -639,26 +645,29 @@
 pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
     vaddr_t va, struct vm_page *pdep, u_int flags)
 {
+       struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+
        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n",
            __func__, pg, pve, pm, va, pdep, flags));
 
-       KASSERT(mutex_owned(&pg->mdpage.pvh_lock));
+       KASSERT(mutex_owned(&md->pvh_lock));
 
        pve->pv_pmap = pm;
        pve->pv_va = va | flags;
        pve->pv_ptp = pdep;
-       pve->pv_next = pg->mdpage.pvh_list;
-       pg->mdpage.pvh_list = pve;
+       pve->pv_next = md->pvh_list;
+       md->pvh_list = pve;
 }
 
 static inline struct pv_entry *
 pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va)
 {
+       struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
        struct pv_entry **pve, *pv;
 
-       KASSERT(mutex_owned(&pg->mdpage.pvh_lock));
+       KASSERT(mutex_owned(&md->pvh_lock));
 
-       for (pv = *(pve = &pg->mdpage.pvh_list);
+       for (pv = *(pve = &md->pvh_list);
            pv; pv = *(pve = &(*pve)->pv_next))
                if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) {
                        *pve = pv->pv_next;
@@ -1182,7 +1191,8 @@
                                continue;
 
                        sheep = PHYS_TO_VM_PAGE(PTE_PAGE(*pde));
-                       for (haggis = sheep->mdpage.pvh_list; haggis != NULL; )
+                       struct vm_page_md * const md = VM_PAGE_TO_MD(sheap);
+                       for (haggis = md->pvh_list; haggis != NULL; )
                                if (haggis->pv_pmap == pmap) {
 
                                        DPRINTF(PDB_FOLLOW, (" 0x%lx",
@@ -1196,7 +1206,7 @@
                                         * exploit the sacred knowledge of
                                         * lambeous ozzmosis
                                         */
-                                       haggis = sheep->mdpage.pvh_list;
+                                       haggis = md->pvh_list;
                                } else
                                        haggis = haggis->pv_next;
                }
@@ -1279,10 +1289,11 @@
                }
 
                pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
-               mutex_enter(&pg->mdpage.pvh_lock);
+               struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+               mutex_enter(&md->pvh_lock);
                pve = pmap_pv_remove(pg, pmap, va);
-               pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
-               mutex_exit(&pg->mdpage.pvh_lock);
+               md->pvh_attrs |= pmap_pvh_attrs(pte);
+               mutex_exit(&md->pvh_lock);
        } else {
                DPRINTF(PDB_ENTER, ("%s: new mapping 0x%lx -> 0x%lx\n",
                    __func__, va, pa));
@@ -1296,11 +1307,12 @@
        }
 
        if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) {
-               mutex_enter(&pg->mdpage.pvh_lock);
+               struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+               mutex_enter(&md->pvh_lock);
 
                if (!pve && !(pve = pmap_pv_alloc())) {
                        if (flags & PMAP_CANFAIL) {
-                               mutex_exit(&pg->mdpage.pvh_lock);
+                               mutex_exit(&md->pvh_lock);
                                PMAP_UNLOCK(pmap);
                                return (ENOMEM);
                        }
@@ -1309,7 +1321,7 @@
                pmap_pv_enter(pg, pve, pmap, va, ptp, 0);
                pmap_check_alias(pg, pve, va, &pte);
 
-               mutex_exit(&pg->mdpage.pvh_lock);
+               mutex_exit(&md->pvh_lock);
        } else if (pve) {
                pmap_pv_free(pve);
        }
@@ -1378,17 +1390,18 @@
 
                        if (pmap_initialized &&
                            (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
+                               struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 
-                               mutex_enter(&pg->mdpage.pvh_lock);
+                               mutex_enter(&md->pvh_lock);
 
                                pve = pmap_pv_remove(pg, pmap, sva);
 
-                               pmap_check_alias(pg, pg->mdpage.pvh_list,
+                               pmap_check_alias(pg, md->pvh_list,
                                    sva, NULL);
 
-                               pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
+                               md->pvh_attrs |= pmap_pvh_attrs(pte);
 
-                               mutex_exit(&pg->mdpage.pvh_lock);
+                               mutex_exit(&md->pvh_lock);
 
                                if (pve != NULL)
                                        pmap_pv_free(pve);
@@ -1438,9 +1451,10 @@
                                continue;
 
                        pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
-                       mutex_enter(&pg->mdpage.pvh_lock);
-                       pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
-                       mutex_exit(&pg->mdpage.pvh_lock);
+                       struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+                       mutex_enter(&md->pvh_lock);
+                       md->pvh_attrs |= pmap_pvh_attrs(pte);
+                       mutex_exit(&md->pvh_lock);
 
                        pmap_pte_flush(pmap, sva, pte);
                        pte &= ~PTE_PROT(TLB_AR_MASK);
@@ -1455,16 +1469,17 @@
 void
 pmap_page_remove(struct vm_page *pg)
 {
+       struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
        struct pv_entry *pve, *npve, **pvp;
 
        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg));
 
-       if (pg->mdpage.pvh_list == NULL)
+       if (md->pvh_list == NULL)
                return;
 
-       mutex_enter(&pg->mdpage.pvh_lock);
-       pvp = &pg->mdpage.pvh_list;
-       for (pve = pg->mdpage.pvh_list; pve; pve = npve) {
+       mutex_enter(&md->pvh_lock);
+       pvp = &md->pvh_list;
+       for (pve = md->pvh_list; pve; pve = npve) {
                pmap_t pmap = pve->pv_pmap;
                vaddr_t va = pve->pv_va & PV_VAMASK;
                volatile pt_entry_t *pde;
@@ -1486,7 +1501,7 @@
                        continue;
                }
 
-               pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
+               md->pvh_attrs |= pmap_pvh_attrs(pte);
 
                pmap_pte_flush(pmap, va, pte);
                if (pte & PTE_PROT(TLB_WIRED))
@@ -1498,7 +1513,7 @@
                PMAP_UNLOCK(pmap);
        }
        *pvp = NULL;
-       mutex_exit(&pg->mdpage.pvh_lock);
+       mutex_exit(&md->pvh_lock);
 
        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: leaving\n", __func__));
 }
@@ -1541,36 +1556,38 @@
 bool
 pmap_changebit(struct vm_page *pg, u_int set, u_int clear)
 {
+       struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
        bool rv;
 
        DPRINTF(PDB_FOLLOW|PDB_BITS, 
            ("%s(%p, %x, %x)\n", __func__, pg, set, clear));
 
-       mutex_enter(&pg->mdpage.pvh_lock);
+       mutex_enter(&md->pvh_lock);
        rv = __changebit(pg, set, clear);
-       mutex_exit(&pg->mdpage.pvh_lock);
+       mutex_exit(&md->pvh_lock);
 
        return rv;
 }
 
 /*
- * Must be called with pg->mdpage.pvh_lock held.
+ * Must be called with md->pvh_lock held.
  */
 static bool
 __changebit(struct vm_page *pg, u_int set, u_int clear)
 {
+       struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
        struct pv_entry *pve;
        int res;
 
-       KASSERT(mutex_owned(&pg->mdpage.pvh_lock));
+       KASSERT(mutex_owned(&md->pvh_lock));
        KASSERT(((set | clear) &
            ~(PVF_MOD|PVF_REF|PVF_UNCACHEABLE|PVF_WRITE)) == 0);
 
        /* preserve other bits */
-       res = pg->mdpage.pvh_attrs & (set | clear);
-       pg->mdpage.pvh_attrs ^= res;
+       res = md->pvh_attrs & (set | clear);



Home | Main Index | Thread Index | Old Index