Port-alpha archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Alpha pmap(9) unmanaged P->V tracking



The attached patch attempts to teach the Alpha pmap(9) how to track
P->V mappings for unmanaged pages, needed by drm graphics drivers.
However, I have no way to test it.

Would anyone like to pick this up, dust it off, and commit it?
>From 0a808adabd7a88d0511d7cd863e0c190e807f77e Mon Sep 17 00:00:00 2001
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Tue, 19 Jul 2022 22:07:05 +0000
Subject: [PATCH] WIP: alpha: Add pmap_pv_track support.

---
 sys/arch/alpha/alpha/pmap.c     | 184 ++++++++++++++++++++------------
 sys/arch/alpha/conf/files.alpha |   1 +
 sys/arch/alpha/include/pmap.h   |  42 +++++---
 3 files changed, 141 insertions(+), 86 deletions(-)

diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c
index df6f5892d39b..f11e56d7eb54 100644
--- a/sys/arch/alpha/alpha/pmap.c
+++ b/sys/arch/alpha/alpha/pmap.c
@@ -346,7 +346,7 @@ static u_int	pmap_max_asn __read_mostly;
  *		tlb_lock -> pmap activation lock
  *
  *	* pvh_lock (global hash) - These locks protect the PV lists for
- *	  managed pages.
+ *	  managed or tracked pages.
  *
  *	* tlb_lock - This IPL_VM lock serializes local and remote TLB
  *	  invalidation.
@@ -390,13 +390,13 @@ static union {
 	uint8_t		pad[COHERENCY_UNIT];
 } pmap_pvh_locks[64] __cacheline_aligned;
 
-#define	PVH_LOCK_HASH(pg)						\
-	((((uintptr_t)(pg)) >> 6) & 63)
+#define	PVH_LOCK_HASH(pp)						\
+	((((uintptr_t)(pp)) >> 6) & 63)
 
 static inline kmutex_t *
-pmap_pvh_lock(struct vm_page *pg)
+pmap_pvh_lock(struct pmap_page *pp)
 {
-	return &pmap_pvh_locks[PVH_LOCK_HASH(pg)].lock;
+	return &pmap_pvh_locks[PVH_LOCK_HASH(pp)].lock;
 }
 
 static union {
@@ -455,7 +455,7 @@ static void	alpha_protection_init(void);
 static pt_entry_t pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool,
 				      pv_entry_t *,
 				      struct pmap_tlb_context *);
-static void	pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t,
+static void	pmap_changebit(struct pmap_page *, pt_entry_t, pt_entry_t,
 			       struct pmap_tlb_context *);
 
 /*
@@ -482,10 +482,10 @@ static int	pmap_l1pt_ctor(void *, void *, int);
 /*
  * PV table management functions.
  */
-static int	pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, pt_entry_t *,
-			      bool, pv_entry_t);
-static void	pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t, bool,
-			       pv_entry_t *, struct pmap_tlb_context *);
+static int	pmap_pv_enter(pmap_t, struct pmap_page *, vaddr_t,
+		    pt_entry_t *, bool, pv_entry_t);
+static void	pmap_pv_remove(pmap_t, struct pmap_page *, vaddr_t, bool,
+		    pv_entry_t *, struct pmap_tlb_context *);
 static void	*pmap_pv_page_alloc(struct pool *, int);
 static void	pmap_pv_page_free(struct pool *, void *);
 
@@ -1401,7 +1401,7 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
 		mutex_init(&pmap_pmap_locks[i].locks.activation_lock,
 		    MUTEX_SPIN, IPL_SCHED);
 	}
-	
+
 	/*
 	 * This must block any interrupt from which a TLB shootdown
 	 * could be issued, but must NOT block IPIs.
@@ -1867,6 +1867,8 @@ pmap_remove_all(pmap_t pmap)
 {
 	struct pmap_tlb_context tlbctx;
 	struct vm_page *pg;
+	struct pmap_page *pp;
+	paddr_t pa;
 	pv_entry_t pv;
 
 	KASSERT(pmap != pmap_kernel());
@@ -1906,8 +1908,13 @@ pmap_remove_all(pmap_t pmap)
 	/* Step 3 */
 	while ((pv = LIST_FIRST(&pmap->pm_pvents)) != NULL) {
 		KASSERT(pv->pv_pmap == pmap);
-		pmap_pv_remove(pmap, PHYS_TO_VM_PAGE(pmap_pte_pa(pv->pv_pte)),
-		    pv->pv_va, true, NULL, &tlbctx);
+		pa = pmap_pte_pa(pv->pv_pte);
+		if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL)
+			pp = &VM_PAGE_TO_MD(pg)->mdp_pp;
+		else
+			pp = pmap_pv_tracked(pa);
+		KASSERT(pp != NULL);
+		pmap_pv_remove(pmap, pp, pv->pv_va, true, NULL, &tlbctx);
 	}
 
 	/* Step 4 */
@@ -1926,14 +1933,8 @@ pmap_remove_all(pmap_t pmap)
 	return true;
 }
 
-/*
- * pmap_page_protect:		[ INTERFACE ]
- *
- *	Lower the permission for all mappings to a given page to
- *	the permissions specified.
- */
-void
-pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
+static void
+pmap_pp_protect(struct pmap_page *pp, vm_prot_t prot)
 {
 	pv_entry_t pv, nextpv;
 	pt_entry_t opte;
@@ -1943,7 +1944,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
 #ifdef DEBUG
 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
 	    (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
-		printf("pmap_page_protect(%p, %x)\n", pg, prot);
+		printf("pmap_pp_protect(%p, %"PRIxPADDR", %x)\n", pp, prot);
 #endif
 
 	pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV);
@@ -1957,9 +1958,9 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
 	case VM_PROT_READ|VM_PROT_EXECUTE:
 	case VM_PROT_READ:
 		PMAP_HEAD_TO_MAP_LOCK();
-		lock = pmap_pvh_lock(pg);
+		lock = pmap_pvh_lock(pp);
 		mutex_enter(lock);
-		for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next) {
+		for (pv = PMAP_PAGE_PVS(pp); pv != NULL; pv = pv->pv_next) {
 			PMAP_LOCK(pv->pv_pmap);
 			opte = atomic_load_relaxed(pv->pv_pte);
 			if (opte & (PG_KWE | PG_UWE)) {
@@ -1982,9 +1983,9 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
 	}
 
 	PMAP_HEAD_TO_MAP_LOCK();
-	lock = pmap_pvh_lock(pg);
+	lock = pmap_pvh_lock(pp);
 	mutex_enter(lock);
-	for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = nextpv) {
+	for (pv = PMAP_PAGE_PVS(pp); pv != NULL; pv = nextpv) {
 		pt_entry_t pte_bits;
 		pmap_t pmap;
 		vaddr_t va;
@@ -2006,6 +2007,29 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
 	TLB_COUNT(reason_page_protect_none);
 }
 
+/*
+ * pmap_page_protect:		[ INTERFACE ]
+ *
+ *	Lower the permission for all mappings to a given page to
+ *	the permissions specified.
+ */
+void
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
+{
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+
+	pmap_pp_protect(&md->mdp_pp, prot);
+}
+
+void
+pmap_pv_protect(paddr_t pa, vm_prot_t prot)
+{
+	struct pmap_page * const pp = pmap_pv_tracked(pa);
+
+	KASSERT(pp != NULL);
+	pmap_pp_protect(pp, prot);
+}
+
 /*
  * pmap_protect:		[ INTERFACE ]
  *
@@ -2176,8 +2200,14 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
 		       pmap, va, pa, prot, flags);
 #endif
 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
+	struct pmap_page *pp;
 	const bool wired = (flags & PMAP_WIRED) != 0;
 
+	if (pg != NULL)
+		pp = &VM_PAGE_TO_MD(pg)->mdp_pp;
+	else
+		pp = pmap_pv_tracked(pa);
+
 	PMAP_MAP_TO_HEAD_LOCK();
 	PMAP_LOCK(pmap);
 
@@ -2314,8 +2344,8 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
 
  validate_enterpv:
 	/* Enter the mapping into the pv_table if appropriate. */
-	if (pg != NULL) {
-		error = pmap_pv_enter(pmap, pg, va, pte, true, opv);
+	if (pp != NULL) {
+		error = pmap_pv_enter(pmap, pp, va, pte, true, opv);
 		if (error) {
 			/* This can only fail if opv == NULL */
 			KASSERT(opv == NULL);
@@ -2340,8 +2370,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
  validate:
 	/* Build the new PTE. */
 	npte = ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap, prot) | PG_V;
-	if (pg != NULL) {
-		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+	if (pp != NULL) {
 		uintptr_t attrs = 0;
 
 		KASSERT(((flags & VM_PROT_ALL) & ~prot) == 0);
@@ -2351,9 +2380,9 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
 		else if (flags & VM_PROT_ALL)
 			attrs |= PGA_REFERENCED;
 
-		lock = pmap_pvh_lock(pg);
+		lock = pmap_pvh_lock(pp);
 		mutex_enter(lock);
-		attrs = (md->pvh_listx |= attrs);
+		attrs = (pp->pp_listx |= attrs);
 		mutex_exit(lock);
 
 		/* Set up referenced/modified emulation for new mapping. */
@@ -2799,6 +2828,7 @@ bool
 pmap_clear_modify(struct vm_page *pg)
 {
 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+	struct pmap_page * const pp = &md->mdp_pp;
 	bool rv = false;
 	kmutex_t *lock;
 	struct pmap_tlb_context tlbctx;
@@ -2811,13 +2841,13 @@ pmap_clear_modify(struct vm_page *pg)
 	pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV);
 
 	PMAP_HEAD_TO_MAP_LOCK();
-	lock = pmap_pvh_lock(pg);
+	lock = pmap_pvh_lock(pp);
 	mutex_enter(lock);
 
-	if (md->pvh_listx & PGA_MODIFIED) {
+	if (pp->pp_listx & PGA_MODIFIED) {
 		rv = true;
-		pmap_changebit(pg, PG_FOW, ~0UL, &tlbctx);
-		md->pvh_listx &= ~PGA_MODIFIED;
+		pmap_changebit(pp, PG_FOW, ~0UL, &tlbctx);
+		pp->pp_listx &= ~PGA_MODIFIED;
 	}
 
 	mutex_exit(lock);
@@ -2838,6 +2868,7 @@ bool
 pmap_clear_reference(struct vm_page *pg)
 {
 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+	struct pmap_page * const pp = &md->mdp_pp;
 	bool rv = false;
 	kmutex_t *lock;
 	struct pmap_tlb_context tlbctx;
@@ -2850,13 +2881,13 @@ pmap_clear_reference(struct vm_page *pg)
 	pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV);
 
 	PMAP_HEAD_TO_MAP_LOCK();
-	lock = pmap_pvh_lock(pg);
+	lock = pmap_pvh_lock(pp);
 	mutex_enter(lock);
 
-	if (md->pvh_listx & PGA_REFERENCED) {
+	if (pp->pp_listx & PGA_REFERENCED) {
 		rv = true;
-		pmap_changebit(pg, PG_FOR | PG_FOW | PG_FOE, ~0UL, &tlbctx);
-		md->pvh_listx &= ~PGA_REFERENCED;
+		pmap_changebit(pp, PG_FOR | PG_FOW | PG_FOE, ~0UL, &tlbctx);
+		pp->pp_listx &= ~PGA_REFERENCED;
 	}
 
 	mutex_exit(lock);
@@ -2964,7 +2995,8 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte,
 {
 	pt_entry_t opte;
 	paddr_t pa;
-	struct vm_page *pg;		/* if != NULL, page is managed */
+	struct vm_page *pg;
+	struct pmap_page *pp;
 
 #ifdef DEBUG
 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
@@ -3018,9 +3050,12 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte,
 		/*
 		 * Remove it from the PV table.
 		 */
-		pg = PHYS_TO_VM_PAGE(pa);
-		KASSERT(pg != NULL);
-		pmap_pv_remove(pmap, pg, va, dolock, opvp, tlbctx);
+		if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL)
+			pp = &VM_PAGE_TO_MD(pg)->mdp_pp;
+		else
+			pp = pmap_pv_tracked(pa);
+		KASSERT(pp != NULL);
+		pmap_pv_remove(pmap, pp, va, dolock, opvp, tlbctx);
 		KASSERT(opvp == NULL || *opvp != NULL);
 	}
 
@@ -3038,7 +3073,7 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte,
  *	the pmaps as we encounter them.
  */
 static void
-pmap_changebit(struct vm_page *pg, pt_entry_t set, pt_entry_t mask,
+pmap_changebit(struct pmap_page *pp, pt_entry_t set, pt_entry_t mask,
     struct pmap_tlb_context * const tlbctx)
 {
 	pv_entry_t pv;
@@ -3047,13 +3082,13 @@ pmap_changebit(struct vm_page *pg, pt_entry_t set, pt_entry_t mask,
 #ifdef DEBUG
 	if (pmapdebug & PDB_BITS)
 		printf("pmap_changebit(%p, 0x%lx, 0x%lx)\n",
-		    pg, set, mask);
+		    pp, set, mask);
 #endif
 
 	/*
 	 * Loop over all current mappings setting/clearing as apropos.
 	 */
-	for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next) {
+	for (pv = PMAP_PAGE_PVS(pp); pv != NULL; pv = pv->pv_next) {
 		PMAP_LOCK(pv->pv_pmap);
 
 		pte = pv->pv_pte;
@@ -3082,6 +3117,7 @@ pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int type)
 	struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap;
 	pt_entry_t faultoff, *pte;
 	struct vm_page *pg;
+	struct pmap_page *pp;
 	paddr_t pa;
 	bool didlock = false;
 	bool exec = false;
@@ -3157,27 +3193,29 @@ pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int type)
 	 * 	(1) always mark page as used, and
 	 *	(2) if it was a write fault, mark page as modified.
 	 */
-	pg = PHYS_TO_VM_PAGE(pa);
-	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-	struct pmap_tlb_context tlbctx;
+	if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL)
+		pp = &VM_PAGE_TO_MD(pg)->mdp_pp;
+	else
+		pp = pmap_pv_tracked(pa);
 
+	struct pmap_tlb_context tlbctx;
 	pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV);
 
 	PMAP_HEAD_TO_MAP_LOCK();
-	lock = pmap_pvh_lock(pg);
+	lock = pmap_pvh_lock(pp);
 	mutex_enter(lock);
 
 	if (type == ALPHA_MMCSR_FOW) {
-		md->pvh_listx |= (PGA_REFERENCED|PGA_MODIFIED);
+		pp->pp_listx |= (PGA_REFERENCED|PGA_MODIFIED);
 		faultoff = PG_FOR | PG_FOW;
 	} else {
-		md->pvh_listx |= PGA_REFERENCED;
+		pp->pp_listx |= PGA_REFERENCED;
 		faultoff = PG_FOR;
 		if (exec) {
 			faultoff |= PG_FOE;
 		}
 	}
-	pmap_changebit(pg, 0, ~faultoff, &tlbctx);
+	pmap_changebit(pp, 0, ~faultoff, &tlbctx);
 
 	mutex_exit(lock);
 	PMAP_HEAD_TO_MAP_UNLOCK();
@@ -3198,18 +3236,24 @@ void
 pmap_pv_dump(paddr_t pa)
 {
 	struct vm_page *pg;
-	struct vm_page_md *md;
+	struct pmap_page *pp;
 	pv_entry_t pv;
 	kmutex_t *lock;
 
-	pg = PHYS_TO_VM_PAGE(pa);
-	md = VM_PAGE_TO_MD(pg);
+	if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL)
+		pp = &VM_PAGE_TO_MD(pg)->mdp_pp;
+	else
+		pp = pmap_pv_tracked(pa);
+	if (pp == NULL) {
+		printf("pa 0x%lx: unmanaged and untracked -- no P->V\n", pa);
+		return;
+	}
 
-	lock = pmap_pvh_lock(pg);
+	lock = pmap_pvh_lock(pp);
 	mutex_enter(lock);
 
-	printf("pa 0x%lx (attrs = 0x%lx):\n", pa, md->pvh_listx & PGA_ATTRS);
-	for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next)
+	printf("pa 0x%lx (attrs = 0x%lx):\n", pa, pp->pp_listx & PGA_ATTRS);
+	for (pv = PMAP_PAGE_PVS(pp); pv != NULL; pv = pv->pv_next)
 		printf("     pmap %p, va 0x%lx\n",
 		    pv->pv_pmap, pv->pv_va);
 	printf("\n");
@@ -3268,10 +3312,9 @@ vtophys(vaddr_t const vaddr)
  *	Add a physical->virtual entry to the pv_table.
  */
 static int
-pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte,
+pmap_pv_enter(pmap_t pmap, struct pmap_page *pp, vaddr_t va, pt_entry_t *pte,
     bool dolock, pv_entry_t newpv)
 {
-	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	kmutex_t *lock;
 
 	/*
@@ -3287,7 +3330,7 @@ pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte,
 	newpv->pv_pte = pte;
 
 	if (dolock) {
-		lock = pmap_pvh_lock(pg);
+		lock = pmap_pvh_lock(pp);
 		mutex_enter(lock);
 	}
 
@@ -3297,7 +3340,7 @@ pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte,
 	/*
 	 * Make sure the entry doesn't already exist.
 	 */
-	for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next) {
+	for (pv = PMAP_PAGE_PVS(pp); pv != NULL; pv = pv->pv_next) {
 		if (pmap == pv->pv_pmap && va == pv->pv_va) {
 			printf("pmap = %p, va = 0x%lx\n", pmap, va);
 			panic("pmap_pv_enter: already in pv table");
@@ -3309,9 +3352,9 @@ pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte,
 	/*
 	 * ...and put it in the list.
 	 */
-	uintptr_t const attrs = md->pvh_listx & PGA_ATTRS;
-	newpv->pv_next = (struct pv_entry *)(md->pvh_listx & ~PGA_ATTRS);
-	md->pvh_listx = (uintptr_t)newpv | attrs;
+	uintptr_t const attrs = pp->pp_listx & PGA_ATTRS;
+	newpv->pv_next = (struct pv_entry *)(pp->pp_listx & ~PGA_ATTRS);
+	pp->pp_listx = (uintptr_t)newpv | attrs;
 	LIST_INSERT_HEAD(&pmap->pm_pvents, newpv, pv_link);
 
 	if (dolock) {
@@ -3327,15 +3370,14 @@ pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte,
  *	Remove a physical->virtual entry from the pv_table.
  */
 static void
-pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock,
+pmap_pv_remove(pmap_t pmap, struct pmap_page *pp, vaddr_t va, bool dolock,
     pv_entry_t *opvp, struct pmap_tlb_context * const tlbctx)
 {
-	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pv_entry_t pv, *pvp;
 	kmutex_t *lock;
 
 	if (dolock) {
-		lock = pmap_pvh_lock(pg);
+		lock = pmap_pvh_lock(pp);
 		mutex_enter(lock);
 	} else {
 		lock = NULL; /* XXX stupid gcc */
@@ -3344,7 +3386,7 @@ pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock,
 	/*
 	 * Find the entry to remove.
 	 */
-	for (pvp = (struct pv_entry **)&md->pvh_listx, pv = VM_MDPAGE_PVS(pg);
+	for (pvp = (struct pv_entry **)&pp->pp_listx, pv = PMAP_PAGE_PVS(pp);
 	     pv != NULL; pvp = &pv->pv_next, pv = *pvp)
 		if (pmap == pv->pv_pmap && va == pv->pv_va)
 			break;
diff --git a/sys/arch/alpha/conf/files.alpha b/sys/arch/alpha/conf/files.alpha
index 82604ac1fe90..a856d64a488a 100644
--- a/sys/arch/alpha/conf/files.alpha
+++ b/sys/arch/alpha/conf/files.alpha
@@ -394,6 +394,7 @@ file	arch/alpha/common/bus_dma.c
 file	arch/alpha/common/comlogout.c
 file	dev/cons.c
 file	kern/kern_cctr.c
+file	uvm/pmap/pmap_pvt.c
 
 file	dev/bus_dma/bus_dmamem_common.c
 
diff --git a/sys/arch/alpha/include/pmap.h b/sys/arch/alpha/include/pmap.h
index bc699505d03e..5f6429acdf00 100644
--- a/sys/arch/alpha/include/pmap.h
+++ b/sys/arch/alpha/include/pmap.h
@@ -182,12 +182,12 @@ typedef struct pv_entry {
 	pt_entry_t	*pv_pte;	/* PTE that maps the VA */
 } *pv_entry_t;
 
-/* attrs in pvh_listx */
+/* attrs in pp_listx */
 #define	PGA_MODIFIED		0x01UL		/* modified */
 #define	PGA_REFERENCED		0x02UL		/* referenced */
 #define	PGA_ATTRS		(PGA_MODIFIED | PGA_REFERENCED)
 
-/* pvh_usage */
+/* pmap_physpage_alloc usage */
 #define	PGU_NORMAL		0		/* free or normal use */
 #define	PGU_PVENT		1		/* PV entries */
 #define	PGU_L1PT		2		/* level 1 page table */
@@ -213,9 +213,9 @@ void	pmap_tlb_shootdown_ipi(struct cpu_info *, struct trapframe *);
 #define	pmap_update(pmap)		/* nothing (yet) */
 
 #define	pmap_is_referenced(pg)						\
-	(((pg)->mdpage.pvh_listx & PGA_REFERENCED) != 0)
+	(((pg)->mdpage.mdp_pp.pp_listx & PGA_REFERENCED) != 0)
 #define	pmap_is_modified(pg)						\
-	(((pg)->mdpage.pvh_listx & PGA_MODIFIED) != 0)
+	(((pg)->mdpage.mdp_pp.pp_listx & PGA_MODIFIED) != 0)
 
 #define	PMAP_STEAL_MEMORY		/* enable pmap_steal_memory() */
 #define	PMAP_GROWKERNEL			/* enable pmap_growkernel() */
@@ -353,12 +353,19 @@ do {									\
 	}								\
 } while (0)
 
+/*
+ * pv tracking
+ */
+struct pmap_page {
+	uintptr_t pp_listx;		/* pv_entry list + attrs */
+};
+
 /*
  * pmap-specific data store in the vm_page structure.
  */
 #define	__HAVE_VM_PAGE_MD
 struct vm_page_md {
-	uintptr_t pvh_listx;		/* pv_entry list + attrs */
+	struct pmap_page mdp_pp;
 	/*
 	 * XXX These fields are only needed for pages that are used
 	 * as PT pages.  It would be nice to find safely-unused fields
@@ -368,28 +375,33 @@ struct vm_page_md {
 	 * 0-1025 ... 1025 because sometimes we need to take an extra
 	 * reference temporarily in pmap_enter().)
 	 */
-	unsigned int pvh_physpgrefs;	/* # refs as a PT page */
-	unsigned int pvh_spare0;	/* XXX spare field */
+	unsigned int mdp_physpgrefs;	/* # refs as a PT page */
+	unsigned int mdp_spare0;	/* XXX spare field */
 };
 
 /* Reference counting for page table pages. */
 #define	PHYSPAGE_REFCNT(pg)						\
-	atomic_load_relaxed(&(pg)->mdpage.pvh_physpgrefs)
+	atomic_load_relaxed(&(pg)->mdpage.mdp_physpgrefs)
 #define	PHYSPAGE_REFCNT_SET(pg, v)					\
-	atomic_store_relaxed(&(pg)->mdpage.pvh_physpgrefs, (v))
+	atomic_store_relaxed(&(pg)->mdpage.mdp_physpgrefs, (v))
 #define	PHYSPAGE_REFCNT_INC(pg)						\
-	atomic_inc_uint_nv(&(pg)->mdpage.pvh_physpgrefs)
+	atomic_inc_uint_nv(&(pg)->mdpage.mdp_physpgrefs)
 #define	PHYSPAGE_REFCNT_DEC(pg)						\
-	atomic_dec_uint_nv(&(pg)->mdpage.pvh_physpgrefs)
+	atomic_dec_uint_nv(&(pg)->mdpage.mdp_physpgrefs)
 
-#define	VM_MDPAGE_PVS(pg)						\
-	((struct pv_entry *)((pg)->mdpage.pvh_listx & ~3UL))
+#define	PMAP_PAGE_PVS(pp)						\
+	((struct pv_entry *)((pp)->pp_listx & ~3UL))
 
-#define	VM_MDPAGE_INIT(pg)						\
+#define	PMAP_PAGE_INIT(pp)						\
 do {									\
-	(pg)->mdpage.pvh_listx = 0UL;					\
+	(pp)->pp_listx = 0UL;						\
 } while (/*CONSTCOND*/0)
 
+#define	VM_MDPAGE_INIT(pg)						\
+	PMAP_PAGE_INIT(&(pg)->mdpage.mdp_pp)
+
+#include <uvm/pmap/pmap_pvt.h>
+
 #endif /* _KERNEL */
 
 #endif /* _PMAP_MACHINE_ */


Home | Main Index | Thread Index | Old Index