Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Use VM_PAGE_TO_MD() to locate struct vm_page_md. N...
details: https://anonhg.NetBSD.org/src/rev/2beecdb2b038
branches: trunk
changeset: 758276:2beecdb2b038
user: uebayasi <uebayasi%NetBSD.org@localhost>
date: Sat Oct 30 18:08:48 2010 +0000
description:
Use VM_PAGE_TO_MD() to locate struct vm_page_md. No functional
changes.
diffstat:
sys/arch/powerpc/oea/pmap.c | 26 ++++++++++++++++++------
sys/arch/powerpc/powerpc/pmap_subr.c | 20 +++++++++++-------
sys/arch/sh3/sh3/pmap.c | 38 ++++++++++++++++++++---------------
3 files changed, 53 insertions(+), 31 deletions(-)
diffs (289 lines):
diff -r ea1ee8294976 -r 2beecdb2b038 sys/arch/powerpc/oea/pmap.c
--- a/sys/arch/powerpc/oea/pmap.c Sat Oct 30 17:44:04 2010 +0000
+++ b/sys/arch/powerpc/oea/pmap.c Sat Oct 30 18:08:48 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.71 2010/02/25 23:31:47 matt Exp $ */
+/* $NetBSD: pmap.c,v 1.72 2010/10/30 18:08:48 uebayasi Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -63,7 +63,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.71 2010/02/25 23:31:47 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.72 2010/10/30 18:08:48 uebayasi Exp $");
#define PMAP_NOOPNAMES
@@ -91,6 +91,8 @@
#include <powerpc/oea/spr.h>
#include <powerpc/oea/sr_601.h>
+#define VM_PAGE_TO_MD(pg) (&(pg)->mdpage)
+
#ifdef ALTIVEC
int pmap_use_altivec;
#endif
@@ -690,38 +692,48 @@
pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
{
struct vm_page *pg;
+ struct vm_page_md *md;
pg = PHYS_TO_VM_PAGE(pa);
if (pg_p != NULL)
*pg_p = pg;
if (pg == NULL)
return &pmap_pvo_unmanaged;
- return &pg->mdpage.mdpg_pvoh;
+ md = VM_PAGE_TO_MD(pg);
+ return &md->mdpg_pvoh;
}
static inline struct pvo_head *
vm_page_to_pvoh(struct vm_page *pg)
{
- return &pg->mdpage.mdpg_pvoh;
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+
+ return &md->mdpg_pvoh;
}
static inline void
pmap_attr_clear(struct vm_page *pg, int ptebit)
{
- pg->mdpage.mdpg_attrs &= ~ptebit;
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+
+ md->mdpg_attrs &= ~ptebit;
}
static inline int
pmap_attr_fetch(struct vm_page *pg)
{
- return pg->mdpage.mdpg_attrs;
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+
+ return md->mdpg_attrs;
}
static inline void
pmap_attr_save(struct vm_page *pg, int ptebit)
{
- pg->mdpage.mdpg_attrs |= ptebit;
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+
+ md->mdpg_attrs |= ptebit;
}
static inline int
diff -r ea1ee8294976 -r 2beecdb2b038 sys/arch/powerpc/powerpc/pmap_subr.c
--- a/sys/arch/powerpc/powerpc/pmap_subr.c Sat Oct 30 17:44:04 2010 +0000
+++ b/sys/arch/powerpc/powerpc/pmap_subr.c Sat Oct 30 18:08:48 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap_subr.c,v 1.22 2010/01/28 12:37:45 phx Exp $ */
+/* $NetBSD: pmap_subr.c,v 1.23 2010/10/30 18:08:48 uebayasi Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap_subr.c,v 1.22 2010/01/28 12:37:45 phx Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_subr.c,v 1.23 2010/10/30 18:08:48 uebayasi Exp $");
#include "opt_multiprocessor.h"
#include "opt_altivec.h"
@@ -51,6 +51,8 @@
#endif
#include <powerpc/psl.h>
+#define VM_PAGE_TO_MD(pg) (&(pg)->mdpage)
+
#define MFMSR() mfmsr()
#define MTMSR(psl) __asm volatile("sync; mtmsr %0; isync" :: "r"(psl))
@@ -292,14 +294,15 @@
* of this page since the page contents will have changed.
*/
struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
KDASSERT(pg != NULL);
- KDASSERT(LIST_EMPTY(&pg->mdpage.mdpg_pvoh));
+ KDASSERT(LIST_EMPTY(&md->mdpg_pvoh));
#ifdef PMAPCOUNTERS
- if (pg->mdpage.mdpg_attrs & PTE_EXEC) {
+ if (md->mdpg_attrs & PTE_EXEC) {
PMAPCOUNT(exec_uncached_zero_page);
}
#endif
- pg->mdpage.mdpg_attrs &= ~PTE_EXEC;
+ md->mdpg_attrs &= ~PTE_EXEC;
}
#endif
@@ -372,14 +375,15 @@
* changed.
*/
struct vm_page *pg = PHYS_TO_VM_PAGE(dst);
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
KDASSERT(pg != NULL);
- KDASSERT(LIST_EMPTY(&pg->mdpage.mdpg_pvoh));
+ KDASSERT(LIST_EMPTY(&md->mdpg_pvoh));
#ifdef PMAPCOUNTERS
- if (pg->mdpage.mdpg_attrs & PTE_EXEC) {
+ if (md->mdpg_attrs & PTE_EXEC) {
PMAPCOUNT(exec_uncached_copy_page);
}
#endif
- pg->mdpage.mdpg_attrs &= ~PTE_EXEC;
+ md->mdpg_attrs &= ~PTE_EXEC;
}
#endif
diff -r ea1ee8294976 -r 2beecdb2b038 sys/arch/sh3/sh3/pmap.c
--- a/sys/arch/sh3/sh3/pmap.c Sat Oct 30 17:44:04 2010 +0000
+++ b/sys/arch/sh3/sh3/pmap.c Sat Oct 30 18:08:48 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.74 2009/11/07 07:27:46 cegger Exp $ */
+/* $NetBSD: pmap.c,v 1.75 2010/10/30 18:15:04 uebayasi Exp $ */
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.74 2009/11/07 07:27:46 cegger Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.75 2010/10/30 18:15:04 uebayasi Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -43,6 +43,8 @@
#include <sh3/mmu.h>
#include <sh3/cache.h>
+#define VM_PAGE_TO_MD(pg) (&(pg)->mdpage)
+
#ifdef DEBUG
#define STATIC
#else
@@ -339,7 +341,7 @@
entry |= _PG_WIRED;
if (pg != NULL) { /* memory-space */
- pvh = &pg->mdpage;
+ pvh = VM_PAGE_TO_MD(pg);
entry |= PG_C; /* always cached */
/* Seed modified/reference tracking */
@@ -483,7 +485,7 @@
* XXX mapping them uncached (like arm and mips do).
*/
again:
- pvh = &pg->mdpage;
+ pvh = VM_PAGE_TO_MD(pg);
SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
if (sh_cache_indexof(va) !=
sh_cache_indexof(pv->pv_va)) {
@@ -495,7 +497,7 @@
}
/* Register pv map */
- pvh = &pg->mdpage;
+ pvh = VM_PAGE_TO_MD(pg);
pv = __pmap_pv_alloc();
pv->pv_pmap = pmap;
pv->pv_va = va;
@@ -547,12 +549,12 @@
int s;
s = splvm();
- pvh = &pg->mdpage;
+ pvh = VM_PAGE_TO_MD(pg);
SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
if (pv->pv_pmap == pmap && pv->pv_va == vaddr) {
if (SH_HAS_VIRTUAL_ALIAS ||
(SH_HAS_WRITEBACK_CACHE &&
- (pg->mdpage.pvh_flags & PVH_MODIFIED))) {
+ (pvh->pvh_flags & PVH_MODIFIED))) {
/*
* Always use index ops. since I don't want to
* worry about address space.
@@ -699,7 +701,7 @@
void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
- struct vm_page_md *pvh = &pg->mdpage;
+ struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
struct pv_entry *pv;
struct pmap *pmap;
vaddr_t va;
@@ -789,24 +791,25 @@
bool
pmap_is_referenced(struct vm_page *pg)
{
+ struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
- return ((pg->mdpage.pvh_flags & PVH_REFERENCED) ? true : false);
+ return ((pvh->pvh_flags & PVH_REFERENCED) ? true : false);
}
bool
pmap_clear_reference(struct vm_page *pg)
{
- struct vm_page_md *pvh = &pg->mdpage;
+ struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
struct pv_entry *pv;
pt_entry_t *pte;
pmap_t pmap;
vaddr_t va;
int s;
- if ((pg->mdpage.pvh_flags & PVH_REFERENCED) == 0)
+ if ((pvh->pvh_flags & PVH_REFERENCED) == 0)
return (false);
- pg->mdpage.pvh_flags &= ~PVH_REFERENCED;
+ pvh->pvh_flags &= ~PVH_REFERENCED;
s = splvm();
/* Restart reference bit emulation */
@@ -831,14 +834,15 @@
bool
pmap_is_modified(struct vm_page *pg)
{
+ struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
- return ((pg->mdpage.pvh_flags & PVH_MODIFIED) ? true : false);
+ return ((pvh->pvh_flags & PVH_MODIFIED) ? true : false);
}
bool
pmap_clear_modify(struct vm_page *pg)
{
- struct vm_page_md *pvh = &pg->mdpage;
+ struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
struct pv_entry *pv;
struct pmap *pmap;
pt_entry_t *pte, entry;
@@ -1023,12 +1027,14 @@
/* Emulate reference/modified tracking for managed page. */
if (flags != 0 && (pg = PHYS_TO_VM_PAGE(entry & PG_PPN)) != NULL) {
+ struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
+
if (flags & PVH_REFERENCED) {
- pg->mdpage.pvh_flags |= PVH_REFERENCED;
+ pvh->pvh_flags |= PVH_REFERENCED;
entry |= PG_V;
}
if (flags & PVH_MODIFIED) {
- pg->mdpage.pvh_flags |= PVH_MODIFIED;
+ pvh->pvh_flags |= PVH_MODIFIED;
entry |= PG_D;
}
*pte = entry;
Home |
Main Index |
Thread Index |
Old Index