Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/uebayasi-xip]: src/sys/arch/arm/arm32 Convert pmap_enter() and pmap_vac_...
details: https://anonhg.NetBSD.org/src/rev/bbb0d526b010
branches: uebayasi-xip
changeset: 751557:bbb0d526b010
user: uebayasi <uebayasi%NetBSD.org@localhost>
date: Wed Feb 10 13:58:08 2010 +0000
description:
Convert pmap_enter() and pmap_vac_me_harder().
diffstat:
sys/arch/arm/arm32/pmap.c | 338 ++++++++++++++++++++++++---------------------
1 files changed, 179 insertions(+), 159 deletions(-)
diffs (truncated from 838 to 300 lines):
diff -r fd1daadd8a4d -r bbb0d526b010 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Wed Feb 10 13:26:22 2010 +0000
+++ b/sys/arch/arm/arm32/pmap.c Wed Feb 10 13:58:08 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.211.2.3 2010/02/10 13:26:22 uebayasi Exp $ */
+/* $NetBSD: pmap.c,v 1.211.2.4 2010/02/10 13:58:08 uebayasi Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -211,7 +211,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.211.2.3 2010/02/10 13:26:22 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.211.2.4 2010/02/10 13:58:08 uebayasi Exp $");
#ifdef PMAP_DEBUG
@@ -656,10 +656,10 @@
static int pmap_l2ptp_ctor(void *, void *, int);
static int pmap_l2dtable_ctor(void *, void *, int);
-static void pmap_vac_me_harder(struct vm_page *, pmap_t, vaddr_t);
+static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
#ifdef PMAP_CACHE_VIVT
-static void pmap_vac_me_kpmap(struct vm_page *, pmap_t, vaddr_t);
-static void pmap_vac_me_user(struct vm_page *, pmap_t, vaddr_t);
+static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
+static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
#endif
static void pmap_clearbit(struct vm_page *, u_int);
@@ -673,7 +673,7 @@
PMAP_FLUSH_SECONDARY,
PMAP_CLEAN_PRIMARY
};
-static void pmap_flush_page(struct vm_page *, enum pmap_flush_op);
+static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op);
#endif
static void pmap_page_remove(struct vm_page *);
@@ -1573,50 +1573,50 @@
};
static inline int
-pmap_get_vac_flags(const struct vm_page *pg)
+pmap_get_vac_flags(const struct vm_page_md *md)
{
int kidx, uidx;
kidx = 0;
- if (pg->mdpage.kro_mappings || pg->mdpage.krw_mappings > 1)
+ if (md->kro_mappings || md->krw_mappings > 1)
kidx |= 1;
- if (pg->mdpage.krw_mappings)
+ if (md->krw_mappings)
kidx |= 2;
uidx = 0;
- if (pg->mdpage.uro_mappings || pg->mdpage.urw_mappings > 1)
+ if (md->uro_mappings || md->urw_mappings > 1)
uidx |= 1;
- if (pg->mdpage.urw_mappings)
+ if (md->urw_mappings)
uidx |= 2;
return (pmap_vac_flags[uidx][kidx]);
}
static inline void
-pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t va)
+pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
{
int nattr;
- nattr = pmap_get_vac_flags(pg);
+ nattr = pmap_get_vac_flags(md);
if (nattr < 0) {
- pg->mdpage.pvh_attrs &= ~PVF_NC;
+ md->pvh_attrs &= ~PVF_NC;
return;
}
- if (nattr == 0 && (pg->mdpage.pvh_attrs & PVF_NC) == 0)
+ if (nattr == 0 && (md->pvh_attrs & PVF_NC) == 0)
return;
if (pm == pmap_kernel())
- pmap_vac_me_kpmap(pg, pm, va);
+ pmap_vac_me_kpmap(md, pa, pm, va);
else
- pmap_vac_me_user(pg, pm, va);
-
- pg->mdpage.pvh_attrs = (pg->mdpage.pvh_attrs & ~PVF_NC) | nattr;
+ pmap_vac_me_user(md, pa, pm, va);
+
+ md->pvh_attrs = (md->pvh_attrs & ~PVF_NC) | nattr;
}
static void
-pmap_vac_me_kpmap(struct vm_page *pg, pmap_t pm, vaddr_t va)
+pmap_vac_me_kpmap(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
{
u_int u_cacheable, u_entries;
struct pv_entry *pv;
@@ -1628,19 +1628,19 @@
* kernel-writable pages.
*/
u_cacheable = 0;
- SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
+ SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0)
u_cacheable++;
}
- u_entries = pg->mdpage.urw_mappings + pg->mdpage.uro_mappings;
+ u_entries = md->urw_mappings + md->uro_mappings;
/*
* We know we have just been updating a kernel entry, so if
* all user pages are already cacheable, then there is nothing
* further to do.
*/
- if (pg->mdpage.k_mappings == 0 && u_cacheable == u_entries)
+ if (md->k_mappings == 0 && u_cacheable == u_entries)
return;
if (u_entries) {
@@ -1649,7 +1649,7 @@
* might not be set correctly, call pmap_vac_me_user
* to recalculate the settings.
*/
- SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
+ SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
/*
* We know kernel mappings will get set
* correctly in other calls. We also know
@@ -1664,7 +1664,7 @@
* is writable but non-cacheable, then we can
* skip this entry also.
*/
- if (pg->mdpage.k_mappings &&
+ if (md->k_mappings &&
(pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
(PVF_NC | PVF_WRITE))
continue;
@@ -1674,7 +1674,7 @@
* entries and the page is already
* read-only/cacheable.
*/
- if (pg->mdpage.krw_mappings == 0 &&
+ if (md->krw_mappings == 0 &&
(pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
continue;
@@ -1684,18 +1684,18 @@
* can't tell if they are correct or not, so
* we recalculate anyway.
*/
- pmap_vac_me_user(pg, (last_pmap = pv->pv_pmap), 0);
+ pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0);
}
- if (pg->mdpage.k_mappings == 0)
+ if (md->k_mappings == 0)
return;
}
- pmap_vac_me_user(pg, pm, va);
+ pmap_vac_me_user(md, pa, pm, va);
}
static void
-pmap_vac_me_user(struct vm_page *pg, pmap_t pm, vaddr_t va)
+pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
{
pmap_t kpmap = pmap_kernel();
struct pv_entry *pv, *npv = NULL;
@@ -1713,7 +1713,7 @@
* Keep a pointer to the first one.
*/
npv = NULL;
- SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
+ SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
/* Count mappings in the same pmap */
if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) {
if (entries++ == 0)
@@ -1820,7 +1820,7 @@
#ifdef PMAP_CACHE_VIPT
static void
-pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t va)
+pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
{
struct pv_entry *pv;
vaddr_t tst_mask;
@@ -1828,35 +1828,35 @@
struct l2_bucket *l2b;
pt_entry_t *ptep, pte, opte;
const u_int
- rw_mappings = pg->mdpage.urw_mappings + pg->mdpage.krw_mappings,
- ro_mappings = pg->mdpage.uro_mappings + pg->mdpage.kro_mappings;
+ rw_mappings = md->urw_mappings + md->krw_mappings,
+ ro_mappings = md->uro_mappings + md->kro_mappings;
/* do we need to do anything? */
if (arm_cache_prefer_mask == 0)
return;
- NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: pg=%p, pmap=%p va=%08lx\n",
- pg, pm, va));
+ NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: md=%p, pmap=%p va=%08lx\n",
+ md, pm, va));
KASSERT(!va || pm);
- KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
+ KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
/* Already a conflict? */
- if (__predict_false(pg->mdpage.pvh_attrs & PVF_NC)) {
+ if (__predict_false(md->pvh_attrs & PVF_NC)) {
/* just an add, things are already non-cached */
- KASSERT(!(pg->mdpage.pvh_attrs & PVF_DIRTY));
- KASSERT(!(pg->mdpage.pvh_attrs & PVF_MULTCLR));
+ KASSERT(!(md->pvh_attrs & PVF_DIRTY));
+ KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
bad_alias = false;
if (va) {
PMAPCOUNT(vac_color_none);
bad_alias = true;
- KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
+ KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
goto fixup;
}
- pv = SLIST_FIRST(&pg->mdpage.pvh_list);
+ pv = SLIST_FIRST(&md->pvh_list);
/* the list can't be empty because it would be cachable */
- if (pg->mdpage.pvh_attrs & PVF_KMPAGE) {
- tst_mask = pg->mdpage.pvh_attrs;
+ if (md->pvh_attrs & PVF_KMPAGE) {
+ tst_mask = md->pvh_attrs;
} else {
KASSERT(pv);
tst_mask = pv->pv_va;
@@ -1872,9 +1872,9 @@
if (tst_mask != (pv->pv_va & arm_cache_prefer_mask))
bad_alias = true;
}
- pg->mdpage.pvh_attrs |= PVF_WRITE;
+ md->pvh_attrs |= PVF_WRITE;
if (!bad_alias)
- pg->mdpage.pvh_attrs |= PVF_DIRTY;
+ md->pvh_attrs |= PVF_DIRTY;
} else {
/*
* We have only read-only mappings. Let's see if there
@@ -1884,82 +1884,82 @@
*/
for (; pv; pv = SLIST_NEXT(pv, pv_link)) {
if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) {
- if (pg->mdpage.pvh_attrs & PVF_KMPAGE)
+ if (md->pvh_attrs & PVF_KMPAGE)
bad_alias = true;
break;
}
}
- pg->mdpage.pvh_attrs &= ~PVF_WRITE;
+ md->pvh_attrs &= ~PVF_WRITE;
/*
* No KMPAGE and we exited early, so we must have
* multiple color mappings.
*/
if (!bad_alias && pv != NULL)
- pg->mdpage.pvh_attrs |= PVF_MULTCLR;
+ md->pvh_attrs |= PVF_MULTCLR;
}
/* If no conflicting colors, set everything back to cached */
if (!bad_alias) {
#ifdef DEBUG
- if ((pg->mdpage.pvh_attrs & PVF_WRITE)
+ if ((md->pvh_attrs & PVF_WRITE)
|| ro_mappings < 2) {
- SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link)
+ SLIST_FOREACH(pv, &md->pvh_list, pv_link)
KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0);
}
#endif
- pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC;
- pg->mdpage.pvh_attrs |= tst_mask | PVF_COLORED;
+ md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC;
+ md->pvh_attrs |= tst_mask | PVF_COLORED;
/*
* Restore DIRTY bit if page is modified
*/
- if (pg->mdpage.pvh_attrs & PVF_DMOD)
- pg->mdpage.pvh_attrs |= PVF_DIRTY;
+ if (md->pvh_attrs & PVF_DMOD)
+ md->pvh_attrs |= PVF_DIRTY;
Home |
Main Index |
Thread Index |
Old Index