Current-Users archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Status of current on ARM?



What is the general status of netbsd-current on ARM?

evbarm on OMAP 2420 has been broken since August 6th. Imre Deak brought
up the issues on port-arm, and later a pr was failed
http://www.netbsd.org/cgi-bin/query-pr-single.pl?number=39791 .
An obvious workaround on OMAP 2420 is to undo a few changes, attached
patches, but I'm willing to test if anyone has better fixes around.

I would like to test and the post some thumb mode fixes to the kernel
which are working with OMAP 2420 on a branch from current, taken August
5th. But the failing kernel from current is blocking that work.

Thanks,

-Mikko
>From 26766c0a6a57834ade49058d234abed1bffb4bec Mon Sep 17 00:00:00 2001
From: Mikko Rapeli <mikko.rapeli%teleca.com@localhost>
Date: Fri, 31 Oct 2008 12:01:52 +0200
Subject: [PATCH] Revert "Fix a few more corner cases. Always KMPAGE or pages 
with unmanaged writeable"

This reverts commit 1d3900d66f829eae25037a99917d2f6de341f6e1.
---
 sys/arch/arm/arm32/pmap.c         |  128 +++++++++++++++++--------------------
 sys/arch/arm/include/arm32/pmap.h |    3 -
 2 files changed, 58 insertions(+), 73 deletions(-)

diff --git a/sys/arch/arm/arm32/pmap.c b/sys/arch/arm/arm32/pmap.c
index c6247a2..c77ff9f 100644
--- a/sys/arch/arm/arm32/pmap.c
+++ b/sys/arch/arm/arm32/pmap.c
@@ -855,26 +855,20 @@ pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, 
pmap_t pm,
        pvp = &SLIST_FIRST(&pg->mdpage.pvh_list);
 #ifdef PMAP_CACHE_VIPT
        /*
-        * Insert unmanaged entries, writeable first, at the head of
-        * the pv list.
+        * Insert unmapped entries at the head of the pv list.
         */
        if (__predict_true((flags & PVF_KENTRY) == 0)) {
                while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY)
                        pvp = &SLIST_NEXT(*pvp, pv_link);
-       } else if ((flags & PVF_WRITE) == 0) {
-               while (*pvp != NULL && (*pvp)->pv_flags & PVF_WRITE)
-                       pvp = &SLIST_NEXT(*pvp, pv_link);
        }
 #endif
        SLIST_NEXT(pve, pv_link) = *pvp;                /* add to ... */
        *pvp = pve;                             /* ... locked list */
-       pg->mdpage.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
 #ifdef PMAP_CACHE_VIPT
-       if ((pve->pv_flags & PVF_KWRITE) == PVF_KWRITE)
-               pg->mdpage.pvh_attrs |= PVF_KMOD;
-       if ((pg->mdpage.pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
+       pg->mdpage.pvh_attrs |= flags & (PVF_REF | PVF_MOD | PVF_KENTRY);
+       if ((flags & PVF_MOD) && (pg->mdpage.pvh_attrs & PVF_NC) == 0)
                pg->mdpage.pvh_attrs |= PVF_DIRTY;
-       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
+       KASSERT((pg->mdpage.pvh_attrs & PVF_MOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
 #endif
        if (pm == pmap_kernel()) {
                PMAPCOUNT(kernel_mappings);
@@ -958,6 +952,18 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, 
int skip_wired)
                                        return (NULL);
                                --pm->pm_stats.wired_count;
                        }
+#ifdef PMAP_CACHE_VIPT
+                       /*
+                        * If we are removing the first pv entry and its
+                        * a KENTRY, if the next one isn't also a KENTER,
+                        * clear KENTRY from the page attributes.
+                        */
+                       if (SLIST_FIRST(&pg->mdpage.pvh_list) == pve
+                           && (pve->pv_flags & PVF_KENTRY)
+                           && (SLIST_NEXT(pve, pv_link) == NULL
+                               || (SLIST_NEXT(pve, pv_link)->pv_flags & 
PVF_KENTRY) == 0))
+                               pg->mdpage.pvh_attrs &= ~PVF_KENTRY;
+#endif
                        *prevptr = SLIST_NEXT(pve, pv_link);    /* remove it! */
                        if (pm == pmap_kernel()) {
                                PMAPCOUNT(kernel_unmappings);
@@ -998,21 +1004,18 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t 
va, int skip_wired)
 
 #ifdef PMAP_CACHE_VIPT
        /*
-        * If we no longer have a WRITEABLE KENTRY at the head of list,
-        * clear the KMOD attribute from the page.
-        */
-       if (SLIST_FIRST(&pg->mdpage.pvh_list) == NULL
-           || (SLIST_FIRST(&pg->mdpage.pvh_list)->pv_flags & PVF_KWRITE) == 
PVF_KWRITE)
-               pg->mdpage.pvh_attrs &= ~PVF_KMOD;
-
-       /*
         * If this was a writeable page and there are no more writeable
         * mappings (ignoring KMPAGE), clear the WRITE flag and writeback
         * the contents to memory.
         */
-       if (pg->mdpage.krw_mappings + pg->mdpage.urw_mappings == 0)
+       if ((pg->mdpage.pvh_attrs & PVF_WRITE)
+           && pg->mdpage.krw_mappings + pg->mdpage.urw_mappings == 0) {
                pg->mdpage.pvh_attrs &= ~PVF_WRITE;
-       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
+#if 0 /* XYY */
+               if ((pg->mdpage.pvh_attrs & PVF_NC) == 0)
+                       pmap_flush_page(pg, false);
+#endif
+       }
 #endif /* PMAP_CACHE_VIPT */
 
        return(pve);                            /* return removed pve */
@@ -1037,9 +1040,6 @@ pmap_modify_pv(struct vm_page *pg, pmap_t pm, vaddr_t va,
        struct pv_entry *npv;
        u_int flags, oflags;
 
-       KASSERT((clr_mask & PVF_KENTRY) == 0);
-       KASSERT((set_mask & PVF_KENTRY) == 0);
-
        if ((npv = pmap_find_pv(pg, pm, va)) == NULL)
                return (0);
 
@@ -1053,9 +1053,9 @@ pmap_modify_pv(struct vm_page *pg, pmap_t pm, vaddr_t va,
        if (clr_mask & (PVF_REF | PVF_MOD)) {
                pg->mdpage.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
 #ifdef PMAP_CACHE_VIPT
-               if ((pg->mdpage.pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
+               if ((set_mask & PVF_MOD) && !(pg->mdpage.pvh_attrs & PVF_NC))
                        pg->mdpage.pvh_attrs |= PVF_DIRTY;
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
+               KASSERT((pg->mdpage.pvh_attrs & PVF_MOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
 #endif
        }
 
@@ -1102,7 +1102,6 @@ pmap_modify_pv(struct vm_page *pg, pmap_t pm, vaddr_t va,
                pmap_syncicache_page(pg);
                PMAPCOUNT(exec_synced_remap);
        }
-       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
 #endif
 
        PMAPCOUNT(remappings);
@@ -1851,7 +1850,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
 #endif
 
        KASSERT(!va || pm);
-       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
 
        /* Already a conflict? */
        if (__predict_false(pg->mdpage.pvh_attrs & PVF_NC)) {
@@ -1899,19 +1897,17 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, 
vaddr_t va)
                        }
                        
 #endif
+                       if (ro_mappings > 1
+                           && (pg->mdpage.pvh_attrs & PVF_DIRTY))
+                               pmap_flush_page(pg, false);
+
                        pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC;
                        pg->mdpage.pvh_attrs |= tst_mask | PVF_COLORED;
-                       /*
-                        * Restore DIRTY bit if page is modified
-                        */
-                       if (pg->mdpage.pvh_attrs & PVF_DMOD)
-                               pg->mdpage.pvh_attrs |= PVF_DIRTY;
                        PMAPCOUNT(vac_color_restore);
                } else {
                        KASSERT(SLIST_FIRST(&pg->mdpage.pvh_list) != NULL);
                        KASSERT(SLIST_NEXT(SLIST_FIRST(&pg->mdpage.pvh_list), 
pv_link) != NULL);
                }
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
        } else if (!va) {
                KASSERT(pmap_is_page_colored_p(pg));
@@ -1919,7 +1915,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                    || (pg->mdpage.pvh_attrs & PVF_DIRTY));
                if (rw_mappings == 0)
                        pg->mdpage.pvh_attrs &= ~PVF_WRITE;
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
                return;
        } else if (!pmap_is_page_colored_p(pg)) {
@@ -1930,7 +1925,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                pg->mdpage.pvh_attrs |= PVF_COLORED
                    | (va & arm_cache_prefer_mask)
                    | (rw_mappings > 0 ? PVF_WRITE : 0);
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
                return;
        } else if (((pg->mdpage.pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) {
@@ -1971,7 +1965,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                                PMAPCOUNT(vac_color_ok);
 
                        /* matching color, just return */
-                       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                        KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
                        return;
                }
@@ -1983,7 +1976,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                pmap_flush_page(pg, true);
                pg->mdpage.pvh_attrs &= ~PVF_COLORED;
                pg->mdpage.pvh_attrs |= PVF_NC;
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                PMAPCOUNT(vac_color_erase);
        } else if (rw_mappings == 0
                   && (pg->mdpage.pvh_attrs & PVF_KMPAGE) == 0) {
@@ -2009,7 +2001,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                } else {
                        PMAPCOUNT(vac_color_blind);
                }
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
                return;
        } else {
@@ -2025,16 +2016,12 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, 
vaddr_t va)
                        KASSERT(pv);
                        /*
                         * If there's only one mapped page, change color to the
-                        * page's new color and return.  Restore the DIRTY bit
-                        * that was erased by pmap_flush_page.
+                        * page's new color and return.
                         */
                        if (SLIST_NEXT(pv, pv_link) == NULL) {
                                pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
                                pg->mdpage.pvh_attrs |= (va & 
arm_cache_prefer_mask);
-                               if (pg->mdpage.pvh_attrs & PVF_DMOD)
-                                       pg->mdpage.pvh_attrs |= PVF_DIRTY;
                                PMAPCOUNT(vac_color_change);
-                               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 
|| (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                                KASSERT((rw_mappings == 0) == 
!(pg->mdpage.pvh_attrs & PVF_WRITE));
                                return;
                        }
@@ -2043,7 +2030,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                pg->mdpage.pvh_attrs &= ~PVF_COLORED;
                pg->mdpage.pvh_attrs |= PVF_NC;
                PMAPCOUNT(vac_color_erase);
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
        }
 
   fixup:
@@ -2146,11 +2132,6 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
                va = pv->pv_va;
                pm = pv->pv_pmap;
                oflags = pv->pv_flags;
-               /*
-                * Kernel entries are unmanaged and as such not to be changed.
-                */
-               if (oflags & PVF_KENTRY)
-                       continue;
                pv->pv_flags &= ~maskbits;
 
                pmap_acquire_pmap_lock(pm);
@@ -2294,6 +2275,10 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
        if (need_vac_me_harder) {
                if (pg->mdpage.pvh_attrs & PVF_NC)
                        pmap_vac_me_harder(pg, NULL, 0);
+#if 0 /* XYY */
+               else
+                       pmap_flush_page(pg, false);
+#endif
        }
 #endif
 
@@ -2465,17 +2450,16 @@ pmap_flush_page(struct vm_page *pg, bool flush)
        /*
         * Flush it.
         */
-       if (flush) {
+       if (flush)
                cpu_idcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
-               pg->mdpage.pvh_attrs &= ~PVF_DIRTY;
-       } else {
+       else
                cpu_dcache_wb_range(cdstp + va_offset, PAGE_SIZE);
-               /*
-                * Mark that the page is no longer dirty.
-                */
-               if ((pg->mdpage.pvh_attrs & PVF_DMOD) == 0)
-                       pg->mdpage.pvh_attrs &= ~PVF_DIRTY;
-       }
+
+       /*
+        * Mark that the page is no longer dirty.
+        */
+       if ((pg->mdpage.pvh_attrs & PVF_MOD) == 0)
+               pg->mdpage.pvh_attrs &= ~PVF_DIRTY;
 
        /*
         * Restore the page table entry since we might have interrupted
@@ -2526,6 +2510,9 @@ pmap_page_remove(struct vm_page *pg)
                        PMAPCOUNT(exec_discarded_page_protect);
                pg->mdpage.pvh_attrs &= ~PVF_EXEC;
                KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 
0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
+#if 0 /* XYY */
+               pmap_flush_page(pg, true);      /* wbinv the contents */
+#endif
 #endif
                simple_unlock(&pg->mdpage.pvh_slock);
                PMAP_HEAD_TO_MAP_UNLOCK();
@@ -2628,6 +2615,16 @@ pmap_page_remove(struct vm_page *pg)
        pg->mdpage.pvh_attrs &= ~PVF_EXEC;
        KASSERT(pg->mdpage.urw_mappings == 0);
        KASSERT(pg->mdpage.uro_mappings == 0);
+#if 0 /* XYY */
+       if ((pg->mdpage.pvh_attrs & PMAP_KMPAGE) == 0) {
+               if (SLIST_EMPTY(&pg->mdpage.pvh_list)) {
+                       pmap_flush_page(pg, true);      /* wbinv the contents */
+               } else if ((pg->mdpage.pvh_attrs & PVF_WRITE)
+                          && pg->mdpage.krw_mappings == 0) {
+                       pmap_flush_page(pg, false);     /* wb the contents */
+               }
+       }
+#endif
        if (pg->mdpage.krw_mappings == 0)
                pg->mdpage.pvh_attrs &= ~PVF_WRITE;
        KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == 
!(pg->mdpage.pvh_attrs & PVF_WRITE));
@@ -2979,11 +2976,9 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t 
prot, int flags)
                        simple_unlock(&pg->mdpage.pvh_slock);
                }
        }
-#if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC)
-       simple_lock(&pg->mdpage.pvh_slock);
-       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
+#ifdef PMAP_CACHE_VIPT
+       KASSERT((pg->mdpage.pvh_attrs & PVF_MOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
        KASSERT(((pg->mdpage.pvh_attrs & PVF_WRITE) == 0) == 
(pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0));
-       simple_unlock(&pg->mdpage.pvh_slock);
 #endif
 
        pmap_release_pmap_lock(pm);
@@ -3808,15 +3803,8 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, 
int user)
                    printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 
0x%08lx\n",
                    pm, va, VM_PAGE_TO_PHYS(pg)));
 
-               pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
+               pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD | PVF_DIRTY;
                pv->pv_flags |= PVF_REF | PVF_MOD;
-#ifdef PMAP_CACHE_VIPT
-               /*
-                * If there are cacheable mappings for this page, mark it dirty.
-                */
-               if ((pg->mdpage.pvh_attrs & PVF_NC) == 0)
-                       pg->mdpage.pvh_attrs |= PVF_DIRTY;
-#endif
                simple_unlock(&pg->mdpage.pvh_slock);
 
                /* 
diff --git a/sys/arch/arm/include/arm32/pmap.h 
b/sys/arch/arm/include/arm32/pmap.h
index 9c8c9ef..e6f8cc1 100644
--- a/sys/arch/arm/include/arm32/pmap.h
+++ b/sys/arch/arm/include/arm32/pmap.h
@@ -238,9 +238,6 @@ extern pv_addr_t kernel_l1pt;
 #define        PVF_KENTRY      0x0100          /* page entered via 
pmap_kenter_pa */
 #define        PVF_KMPAGE      0x0200          /* page is used for kmem */
 #define        PVF_DIRTY       0x0400          /* page may have dirty cache 
lines */
-#define        PVF_KMOD        0x0800          /* unmanaged page is modified  
*/
-#define        PVF_KWRITE      (PVF_KENTRY|PVF_WRITE)
-#define        PVF_DMOD        (PVF_MOD|PVF_KMOD|PVF_KMPAGE)
 #define        PVF_NC          (PVF_UNC|PVF_KNC)
 
 /*
-- 
1.5.6.5

>From ddc01ca017fdf2c1f593e61bcc89f96376147012 Mon Sep 17 00:00:00 2001
From: Mikko Rapeli <mikko.rapeli%teleca.com@localhost>
Date: Fri, 31 Oct 2008 13:25:44 +0200
Subject: [PATCH] Revert "Change pv_entries to use SLIST."

This reverts commit d794d1a2a0f8d2e8aac026e5527e6ca13455f35c.

Conflicts:

        sys/arch/arm/arm32/pmap.c
---
 sys/arch/arm/arm32/pmap.c            |  327 +++++++++++-----------------------
 sys/arch/arm/include/arm32/vmparam.h |    4 +-
 2 files changed, 106 insertions(+), 225 deletions(-)

diff --git a/sys/arch/arm/arm32/pmap.c b/sys/arch/arm/arm32/pmap.c
index c77ff9f..3ff70f6 100644
--- a/sys/arch/arm/arm32/pmap.c
+++ b/sys/arch/arm/arm32/pmap.c
@@ -304,7 +304,7 @@ static struct pool_cache pmap_l2ptp_cache;
 static vaddr_t pmap_kernel_l2ptp_kva;
 static paddr_t pmap_kernel_l2ptp_phys;
 
-#ifdef PMAPCOUNTERS
+#ifdef PMAPCOUNT
 #define        PMAP_EVCNT_INITIALIZER(name) \
        EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name)
 
@@ -440,7 +440,7 @@ EVCNT_ATTACH_STATIC(pmap_ev_activations);
  */
 static pt_entry_t *csrc_pte, *cdst_pte;
 static vaddr_t csrcp, cdstp;
-vaddr_t memhook;                       /* used by mem.c */
+vaddr_t memhook;
 extern void *msgbufaddr;
 int pmap_kmpages;
 /*
@@ -594,7 +594,7 @@ int pmap_needs_pte_sync;
  * Real definition of pv_entry.
  */
 struct pv_entry {
-       SLIST_ENTRY(pv_entry) pv_link;  /* next pv_entry */
+       struct pv_entry *pv_next;       /* next pv_entry */
        pmap_t          pv_pmap;        /* pmap where mapping lies */
        vaddr_t         pv_va;          /* virtual address for mapping */
        u_int           pv_flags;       /* flags */
@@ -653,7 +653,7 @@ static int          pmap_clean_page(struct pv_entry *, 
bool);
 #endif
 #ifdef PMAP_CACHE_VIPT
 static void            pmap_syncicache_page(struct vm_page *);
-static void            pmap_flush_page(struct vm_page *, bool);
+static void            pmap_flush_page(struct vm_page *);
 #endif
 static void            pmap_page_remove(struct vm_page *);
 
@@ -852,24 +852,19 @@ pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, 
pmap_t pm,
        pve->pv_flags = flags;
 
        simple_lock(&pg->mdpage.pvh_slock);     /* lock vm_page */
-       pvp = &SLIST_FIRST(&pg->mdpage.pvh_list);
+       pvp = &pg->mdpage.pvh_list;
 #ifdef PMAP_CACHE_VIPT
        /*
         * Insert unmapped entries at the head of the pv list.
         */
        if (__predict_true((flags & PVF_KENTRY) == 0)) {
                while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY)
-                       pvp = &SLIST_NEXT(*pvp, pv_link);
+                       pvp = &(*pvp)->pv_next;
        }
 #endif
-       SLIST_NEXT(pve, pv_link) = *pvp;                /* add to ... */
+       pve->pv_next = *pvp;                    /* add to ... */
        *pvp = pve;                             /* ... locked list */
-#ifdef PMAP_CACHE_VIPT
        pg->mdpage.pvh_attrs |= flags & (PVF_REF | PVF_MOD | PVF_KENTRY);
-       if ((flags & PVF_MOD) && (pg->mdpage.pvh_attrs & PVF_NC) == 0)
-               pg->mdpage.pvh_attrs |= PVF_DIRTY;
-       KASSERT((pg->mdpage.pvh_attrs & PVF_MOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
-#endif
        if (pm == pmap_kernel()) {
                PMAPCOUNT(kernel_mappings);
                if (flags & PVF_WRITE)
@@ -914,7 +909,7 @@ pmap_find_pv(struct vm_page *pg, pmap_t pm, vaddr_t va)
 {
        struct pv_entry *pv;
 
-       SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
+       for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
                if (pm == pv->pv_pmap && va == pv->pv_va)
                        break;
        }
@@ -940,7 +935,7 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, 
int skip_wired)
        NPDEBUG(PDB_PVDUMP,
            printf("pmap_remove_pv: pm %p, pg %p, va 0x%08lx\n", pm, pg, va));
 
-       prevptr = &SLIST_FIRST(&pg->mdpage.pvh_list); /* prev pv_entry ptr */
+       prevptr = &pg->mdpage.pvh_list;         /* previous pv_entry pointer */
        pve = *prevptr;
 
        while (pve) {
@@ -958,13 +953,13 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, 
int skip_wired)
                         * a KENTRY, if the next one isn't also a KENTER,
                         * clear KENTRY from the page attributes.
                         */
-                       if (SLIST_FIRST(&pg->mdpage.pvh_list) == pve
+                       if (pg->mdpage.pvh_list == pve
                            && (pve->pv_flags & PVF_KENTRY)
-                           && (SLIST_NEXT(pve, pv_link) == NULL
-                               || (SLIST_NEXT(pve, pv_link)->pv_flags & 
PVF_KENTRY) == 0))
+                           && (pve->pv_next == NULL
+                               || (pve->pv_next->pv_flags & PVF_KENTRY) == 0))
                                pg->mdpage.pvh_attrs &= ~PVF_KENTRY;
 #endif
-                       *prevptr = SLIST_NEXT(pve, pv_link);    /* remove it! */
+                       *prevptr = pve->pv_next;                /* remove it! */
                        if (pm == pmap_kernel()) {
                                PMAPCOUNT(kernel_unmappings);
                                if (pve->pv_flags & PVF_WRITE)
@@ -987,7 +982,7 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, 
int skip_wired)
                         * otherwise sync the i-cache for this page.
                         */
                        if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) {
-                               if (SLIST_EMPTY(&pg->mdpage.pvh_list)) {
+                               if (pg->mdpage.pvh_list == NULL) {
                                        pg->mdpage.pvh_attrs &= ~PVF_EXEC;
                                        PMAPCOUNT(exec_discarded_unmap);
                                } else {
@@ -998,24 +993,18 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, 
int skip_wired)
 #endif /* PMAP_CACHE_VIPT */
                        break;
                }
-               prevptr = &SLIST_NEXT(pve, pv_link);    /* previous pointer */
-               pve = *prevptr;                         /* advance */
+               prevptr = &pve->pv_next;                /* previous pointer */
+               pve = pve->pv_next;                     /* advance */
        }
 
 #ifdef PMAP_CACHE_VIPT
        /*
         * If this was a writeable page and there are no more writeable
-        * mappings (ignoring KMPAGE), clear the WRITE flag and writeback
-        * the contents to memory.
+        * mappings (ignoring KMPAGE), clear the WRITE flag.
         */
        if ((pg->mdpage.pvh_attrs & PVF_WRITE)
-           && pg->mdpage.krw_mappings + pg->mdpage.urw_mappings == 0) {
+           && pg->mdpage.krw_mappings + pg->mdpage.urw_mappings == 0)
                pg->mdpage.pvh_attrs &= ~PVF_WRITE;
-#if 0 /* XYY */
-               if ((pg->mdpage.pvh_attrs & PVF_NC) == 0)
-                       pmap_flush_page(pg, false);
-#endif
-       }
 #endif /* PMAP_CACHE_VIPT */
 
        return(pve);                            /* return removed pve */
@@ -1050,14 +1039,8 @@ pmap_modify_pv(struct vm_page *pg, pmap_t pm, vaddr_t va,
         * There is at least one VA mapping this page.
         */
 
-       if (clr_mask & (PVF_REF | PVF_MOD)) {
+       if (clr_mask & (PVF_REF | PVF_MOD))
                pg->mdpage.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
-#ifdef PMAP_CACHE_VIPT
-               if ((set_mask & PVF_MOD) && !(pg->mdpage.pvh_attrs & PVF_NC))
-                       pg->mdpage.pvh_attrs |= PVF_DIRTY;
-               KASSERT((pg->mdpage.pvh_attrs & PVF_MOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
-#endif
-       }
 
        oflags = npv->pv_flags;
        npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
@@ -1605,7 +1588,7 @@ pmap_vac_me_kpmap(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
         * kernel-writable pages.
         */
        u_cacheable = 0;
-       SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
+       for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
                if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0)
                        u_cacheable++;
        }
@@ -1626,7 +1609,7 @@ pmap_vac_me_kpmap(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                 * might not be set correctly, call pmap_vac_me_user
                 * to recalculate the settings.
                 */
-               SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
+               for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
                        /* 
                         * We know kernel mappings will get set
                         * correctly in other calls.  We also know
@@ -1689,7 +1672,7 @@ pmap_vac_me_user(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
         * Include kernel mappings as part of our own.
         * Keep a pointer to the first one.
         */
-       SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
+       for (pv = npv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
                /* Count mappings in the same pmap */
                if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) {
                        if (entries++ == 0)
@@ -1721,7 +1704,7 @@ pmap_vac_me_user(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                if (cacheable_entries == 0)
                        return;
 
-               for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) {
+               for (pv = npv; pv; pv = pv->pv_next) {
                        if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) ||
                            (pv->pv_flags & PVF_NC))
                                continue;
@@ -1763,7 +1746,7 @@ pmap_vac_me_user(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                 * Turn cacheing back on for some pages.  If it is a kernel
                 * page, only do so if there are no other writable pages.
                 */
-               for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) {
+               for (pv = npv; pv; pv = pv->pv_next) {
                        if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap &&
                            (kpmap != pv->pv_pmap || other_writable)))
                                continue;
@@ -1831,9 +1814,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
        bool bad_alias;
        struct l2_bucket *l2b;
        pt_entry_t *ptep, pte, opte;
-       const u_int
-           rw_mappings = pg->mdpage.urw_mappings + pg->mdpage.krw_mappings,
-           ro_mappings = pg->mdpage.uro_mappings + pg->mdpage.kro_mappings;
 
        /* do we need to do anything? */
        if (arm_cache_prefer_mask == 0)
@@ -1854,177 +1834,130 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, 
vaddr_t va)
        /* Already a conflict? */
        if (__predict_false(pg->mdpage.pvh_attrs & PVF_NC)) {
                /* just an add, things are already non-cached */
-               KASSERT(!(pg->mdpage.pvh_attrs & PVF_DIRTY));
                bad_alias = false;
                if (va) {
                        PMAPCOUNT(vac_color_none);
                        bad_alias = true;
-                       KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
+                       KASSERT((pg->mdpage.urw_mappings + 
pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
                        goto fixup;
                }
-               pv = SLIST_FIRST(&pg->mdpage.pvh_list);
+               pv = pg->mdpage.pvh_list;
                /* the list can't be empty because it would be cachable */
                if (pg->mdpage.pvh_attrs & PVF_KMPAGE) {
                        tst_mask = pg->mdpage.pvh_attrs;
                } else {
                        KASSERT(pv);
                        tst_mask = pv->pv_va;
-                       pv = SLIST_NEXT(pv, pv_link);
+                       pv = pv->pv_next;
                }
                /*
                 * Only check for a bad alias if we have writable mappings.
                 */
-               tst_mask &= arm_cache_prefer_mask;
-               if (rw_mappings > 0 && arm_cache_prefer_mask) {
-                       for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) {
+               if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0) {
+                       tst_mask &= arm_cache_prefer_mask;
+                       for (; pv && !bad_alias; pv = pv->pv_next) {
                                /* if there's a bad alias, stop checking. */
                                if (tst_mask != (pv->pv_va & 
arm_cache_prefer_mask))
                                        bad_alias = true;
                        }
                        pg->mdpage.pvh_attrs |= PVF_WRITE;
-                       if (!bad_alias)
-                               pg->mdpage.pvh_attrs |= PVF_DIRTY;
-               } else {
-                       pg->mdpage.pvh_attrs &= ~PVF_WRITE;
                }
                /* If no conflicting colors, set everything back to cached */
                if (!bad_alias) {
-#ifdef DEBUG
-                       if ((pg->mdpage.pvh_attrs & PVF_WRITE)
-                           || ro_mappings < 2) {
-                               SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link)
-                                       KDASSERT(((tst_mask ^ pv->pv_va) & 
arm_cache_prefer_mask) == 0);
-                       }
-                       
-#endif
-                       if (ro_mappings > 1
-                           && (pg->mdpage.pvh_attrs & PVF_DIRTY))
-                               pmap_flush_page(pg, false);
-
+                       PMAPCOUNT(vac_color_restore);
                        pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC;
                        pg->mdpage.pvh_attrs |= tst_mask | PVF_COLORED;
-                       PMAPCOUNT(vac_color_restore);
                } else {
-                       KASSERT(SLIST_FIRST(&pg->mdpage.pvh_list) != NULL);
-                       KASSERT(SLIST_NEXT(SLIST_FIRST(&pg->mdpage.pvh_list), 
pv_link) != NULL);
+                       KASSERT(pg->mdpage.pvh_list != NULL);
+                       KASSERT(pg->mdpage.pvh_list->pv_next != NULL);
                }
-               KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
+               KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 
0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
        } else if (!va) {
                KASSERT(pmap_is_page_colored_p(pg));
-               KASSERT(!(pg->mdpage.pvh_attrs & PVF_WRITE)
-                   || (pg->mdpage.pvh_attrs & PVF_DIRTY));
-               if (rw_mappings == 0)
+               pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) | arm_cache_prefer_mask;
+               if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0)
                        pg->mdpage.pvh_attrs &= ~PVF_WRITE;
-               KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
+               KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 
0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
                return;
        } else if (!pmap_is_page_colored_p(pg)) {
                /* not colored so we just use its color */
-               KASSERT(pg->mdpage.pvh_attrs & (PVF_WRITE|PVF_DIRTY));
                PMAPCOUNT(vac_color_new);
                pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
                pg->mdpage.pvh_attrs |= PVF_COLORED
-                   | (va & arm_cache_prefer_mask)
-                   | (rw_mappings > 0 ? PVF_WRITE : 0);
-               KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
+                   | (va & arm_cache_prefer_mask);
+               if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0)
+                       pg->mdpage.pvh_attrs |= PVF_WRITE;
+               KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 
0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
                return;
        } else if (((pg->mdpage.pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) {
                bad_alias = false;
-               if (rw_mappings > 0) {
+               if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0) {
                        /*
                         * We now have writeable mappings and more than one
                         * readonly mapping, verify the colors don't clash
                         * and mark the page as writeable.
                         */
-                       if (ro_mappings > 1
-                           && (pg->mdpage.pvh_attrs & PVF_WRITE) == 0
-                           && arm_cache_prefer_mask) {
+                       if (pg->mdpage.uro_mappings + pg->mdpage.kro_mappings > 
1
+                           && (pg->mdpage.pvh_attrs & PVF_WRITE) == 0) {
                                tst_mask = pg->mdpage.pvh_attrs & 
arm_cache_prefer_mask;
-                               SLIST_FOREACH(pv, &pg->mdpage.pvh_list, 
pv_link) {
+                               for (pv = pg->mdpage.pvh_list;
+                                    pv && !bad_alias;
+                                    pv = pv->pv_next) {
                                        /* if there's a bad alias, stop 
checking. */
-                                       if (((tst_mask ^ pv->pv_va) & 
arm_cache_prefer_mask) == 0) {
+                                       if (tst_mask != (pv->pv_va & 
arm_cache_prefer_mask))
                                                bad_alias = true;
-                                               break;
-                                       }
                                }
                        }
                        pg->mdpage.pvh_attrs |= PVF_WRITE;
                }
                /* If no conflicting colors, set everything back to cached */
                if (!bad_alias) {
-#ifdef DEBUG
-                       if (rw_mappings > 0
-                           || (pg->mdpage.pvh_attrs & PMAP_KMPAGE)) {
-                               tst_mask = pg->mdpage.pvh_attrs & 
arm_cache_prefer_mask;
-                               SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link)
-                                       KDASSERT(((tst_mask ^ pv->pv_va) & 
arm_cache_prefer_mask) == 0);
-                       }
-#endif
-                       if (SLIST_EMPTY(&pg->mdpage.pvh_list))
+                       if (pg->mdpage.pvh_list)
                                PMAPCOUNT(vac_color_reuse);
                        else
                                PMAPCOUNT(vac_color_ok);
-
                        /* matching color, just return */
-                       KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
+                       KASSERT((pg->mdpage.urw_mappings + 
pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
                        return;
                }
-               KASSERT(SLIST_FIRST(&pg->mdpage.pvh_list) != NULL);
-               KASSERT(SLIST_NEXT(SLIST_FIRST(&pg->mdpage.pvh_list), pv_link) 
!= NULL);
+               KASSERT(pg->mdpage.pvh_list != NULL);
+               KASSERT(pg->mdpage.pvh_list->pv_next != NULL);
 
                /* color conflict.  evict from cache. */
 
-               pmap_flush_page(pg, true);
+               pmap_flush_page(pg);
                pg->mdpage.pvh_attrs &= ~PVF_COLORED;
                pg->mdpage.pvh_attrs |= PVF_NC;
-               PMAPCOUNT(vac_color_erase);
-       } else if (rw_mappings == 0
+       } else if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0
                   && (pg->mdpage.pvh_attrs & PVF_KMPAGE) == 0) {
                KASSERT((pg->mdpage.pvh_attrs & PVF_WRITE) == 0);
-
                /*
-                * If the page has dirty cache lines, clean it.
+                * If all the mappings are read-only, don't do anything.
                 */
-               if (pg->mdpage.pvh_attrs & PVF_DIRTY)
-                       pmap_flush_page(pg, false);
-
-               /*
-                * If this is the first remapping (we know that there are no
-                * writeable mappings), then this is a simple color change.
-                * Otherwise this is a seconary r/o mapping, which means
-                * we don't have to do anything.
-                */
-               if (ro_mappings == 1) {
-                       KASSERT(((pg->mdpage.pvh_attrs ^ va) & 
arm_cache_prefer_mask) != 0);
-                       pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
-                       pg->mdpage.pvh_attrs |= (va & arm_cache_prefer_mask);
-                       PMAPCOUNT(vac_color_change);
-               } else {
-                       PMAPCOUNT(vac_color_blind);
-               }
-               KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
+               PMAPCOUNT(vac_color_blind);
+               KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 
0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
                return;
        } else {
-               if (rw_mappings > 0)
+               if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0)
                        pg->mdpage.pvh_attrs |= PVF_WRITE;
 
                /* color conflict.  evict from cache. */
-               pmap_flush_page(pg, true);
+               pmap_flush_page(pg);
 
                /* the list can't be empty because this was a enter/modify */
-               pv = SLIST_FIRST(&pg->mdpage.pvh_list);
-               if ((pg->mdpage.pvh_attrs & PVF_KMPAGE) == 0) {
-                       KASSERT(pv);
-                       /*
-                        * If there's only one mapped page, change color to the
-                        * page's new color and return.
-                        */
-                       if (SLIST_NEXT(pv, pv_link) == NULL) {
-                               pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
-                               pg->mdpage.pvh_attrs |= (va & 
arm_cache_prefer_mask);
-                               PMAPCOUNT(vac_color_change);
-                               KASSERT((rw_mappings == 0) == 
!(pg->mdpage.pvh_attrs & PVF_WRITE));
-                               return;
-                       }
+               pv = pg->mdpage.pvh_list;
+               KASSERT(pv);
+
+               /*
+                * If there's only one mapped page, change color to the
+                * page's new color and return.
+                */
+               if (pv->pv_next == NULL) {
+                       PMAPCOUNT(vac_color_change);
+                       pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
+                       pg->mdpage.pvh_attrs |= (va & arm_cache_prefer_mask);
+                       KASSERT((pg->mdpage.urw_mappings + 
pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
+                       return;
                }
                bad_alias = true;
                pg->mdpage.pvh_attrs &= ~PVF_COLORED;
@@ -2033,12 +1966,12 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, 
vaddr_t va)
        }
 
   fixup:
-       KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
+       KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == 
!(pg->mdpage.pvh_attrs & PVF_WRITE));
 
        /*
         * Turn cacheing on/off for all pages.
         */
-       SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
+       for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
                l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
                ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
                opte = *ptep;
@@ -2049,7 +1982,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                        pv->pv_flags &= ~PVF_NC;
                        pte |= pte_l2_s_cache_mode;
                }
-
                if (opte == pte)        /* only update is there's a change */
                        continue;
 
@@ -2086,7 +2018,6 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
        const bool want_syncicache = PV_IS_EXEC_P(pg->mdpage.pvh_attrs);
        bool need_syncicache = false;
        bool did_syncicache = false;
-       bool need_vac_me_harder = false;
 #endif
 
        NPDEBUG(PDB_BITS,
@@ -2109,7 +2040,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
         */
        pg->mdpage.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
 
-       if (SLIST_EMPTY(&pg->mdpage.pvh_list)) {
+       if (pg->mdpage.pvh_list == NULL) {
 #ifdef PMAP_CACHE_VIPT
                if (need_syncicache) {
                        /*
@@ -2128,7 +2059,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
        /*
         * Loop over all current mappings setting/clearing as appropos
         */
-       SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
+       for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
                va = pv->pv_va;
                pm = pv->pv_pmap;
                oflags = pv->pv_flags;
@@ -2204,16 +2135,15 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
                                        pg->mdpage.pvh_attrs &= ~PVF_WRITE;
                                if (want_syncicache)
                                        need_syncicache = true;
-                               need_vac_me_harder = true;
 #endif
                        }
                }
 
                if (maskbits & PVF_REF) {
+#ifdef PMAP_CACHE_VIVT
                        if ((pv->pv_flags & PVF_NC) == 0 &&
                            (maskbits & (PVF_WRITE|PVF_MOD)) == 0 &&
                            l2pte_valid(npte)) {
-#ifdef PMAP_CACHE_VIVT
                                /*
                                 * Check npte here; we may have already
                                 * done the wbinv above, and the validity
@@ -2229,8 +2159,8 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
                                        pmap_dcache_wb_range(pm,
                                            pv->pv_va, PAGE_SIZE,
                                            true, true);
-#endif
                        }
+#endif
 
                        /*
                         * Make the PTE invalid so that we will take a
@@ -2267,19 +2197,6 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
                pmap_syncicache_page(pg);
                PMAPCOUNT(exec_synced_clearbit);
        }
-       /*
-        * If we are changing this to read-only, we need to call vac_me_harder
-        * so we can change all the read-only pages to cacheable.  We pretend
-        * this as a page deletion.
-        */
-       if (need_vac_me_harder) {
-               if (pg->mdpage.pvh_attrs & PVF_NC)
-                       pmap_vac_me_harder(pg, NULL, 0);
-#if 0 /* XYY */
-               else
-                       pmap_flush_page(pg, false);
-#endif
-       }
 #endif
 
        simple_unlock(&pg->mdpage.pvh_slock);
@@ -2328,7 +2245,7 @@ pmap_clean_page(struct pv_entry *pv, bool is_src)
         */
        pm = curproc->p_vmspace->vm_map.pmap;
 
-       for (npv = pv; npv; npv = SLIST_NEXT(npv, pv_link)) {
+       for (npv = pv; npv; npv = npv->pv_next) {
                if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) {
                        flags |= npv->pv_flags;
                        /*
@@ -2419,7 +2336,7 @@ pmap_syncicache_page(struct vm_page *pg)
 }
 
 void
-pmap_flush_page(struct vm_page *pg, bool flush)
+pmap_flush_page(struct vm_page *pg)
 {
        const vsize_t va_offset = pg->mdpage.pvh_attrs & arm_cache_prefer_mask;
        const size_t pte_offset = va_offset >> PGSHIFT;
@@ -2450,16 +2367,7 @@ pmap_flush_page(struct vm_page *pg, bool flush)
        /*
         * Flush it.
         */
-       if (flush)
-               cpu_idcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
-       else
-               cpu_dcache_wb_range(cdstp + va_offset, PAGE_SIZE);
-
-       /*
-        * Mark that the page is no longer dirty.
-        */
-       if ((pg->mdpage.pvh_attrs & PVF_MOD) == 0)
-               pg->mdpage.pvh_attrs &= ~PVF_DIRTY;
+       cpu_idcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
 
        /*
         * Restore the page table entry since we might have interrupted
@@ -2499,7 +2407,7 @@ pmap_page_remove(struct vm_page *pg)
        PMAP_HEAD_TO_MAP_LOCK();
        simple_lock(&pg->mdpage.pvh_slock);
 
-       pv = SLIST_FIRST(&pg->mdpage.pvh_list);
+       pv = pg->mdpage.pvh_list;
        if (pv == NULL) {
 #ifdef PMAP_CACHE_VIPT
                /*
@@ -2510,9 +2418,6 @@ pmap_page_remove(struct vm_page *pg)
                        PMAPCOUNT(exec_discarded_page_protect);
                pg->mdpage.pvh_attrs &= ~PVF_EXEC;
                KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 
0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
-#if 0 /* XYY */
-               pmap_flush_page(pg, true);      /* wbinv the contents */
-#endif
 #endif
                simple_unlock(&pg->mdpage.pvh_slock);
                PMAP_HEAD_TO_MAP_UNLOCK();
@@ -2538,10 +2443,10 @@ pmap_page_remove(struct vm_page *pg)
        pmap_clean_page(pv, false);
 #endif
 
-       pvp = &SLIST_FIRST(&pg->mdpage.pvh_list);
+       pvp = &pg->mdpage.pvh_list;
        while (pv) {
                pm = pv->pv_pmap;
-               npv = SLIST_NEXT(pv, pv_link);
+               npv = pv->pv_next;
                if (flush == false && (pm == curpm || pm == pmap_kernel()))
                        flush = true;
 
@@ -2554,7 +2459,7 @@ pmap_page_remove(struct vm_page *pg)
                         */
                        if (pv->pv_flags & PVF_KENTRY) {
                                *pvp = pv;
-                               pvp = &SLIST_NEXT(pv, pv_link);
+                               pvp = &pv->pv_next;
                                pv = npv;
                                continue;
                        }
@@ -2601,7 +2506,7 @@ pmap_page_remove(struct vm_page *pg)
                 */
                if (pv == NULL) {
                        *pvp = NULL;
-                       if (!SLIST_EMPTY(&pg->mdpage.pvh_list))
+                       if (pg->mdpage.pvh_list != NULL)
                                pmap_vac_me_harder(pg, pm, 0);
                }
                pmap_release_pmap_lock(pm);
@@ -2613,19 +2518,7 @@ pmap_page_remove(struct vm_page *pg)
        if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs))
                PMAPCOUNT(exec_discarded_page_protect);
        pg->mdpage.pvh_attrs &= ~PVF_EXEC;
-       KASSERT(pg->mdpage.urw_mappings == 0);
-       KASSERT(pg->mdpage.uro_mappings == 0);
-#if 0 /* XYY */
-       if ((pg->mdpage.pvh_attrs & PMAP_KMPAGE) == 0) {
-               if (SLIST_EMPTY(&pg->mdpage.pvh_list)) {
-                       pmap_flush_page(pg, true);      /* wbinv the contents */
-               } else if ((pg->mdpage.pvh_attrs & PVF_WRITE)
-                          && pg->mdpage.krw_mappings == 0) {
-                       pmap_flush_page(pg, false);     /* wb the contents */
-               }
-       }
-#endif
-       if (pg->mdpage.krw_mappings == 0)
+       if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0)
                pg->mdpage.pvh_attrs &= ~PVF_WRITE;
        KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == 
!(pg->mdpage.pvh_attrs & PVF_WRITE));
 #endif
@@ -2976,10 +2869,6 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t 
prot, int flags)
                        simple_unlock(&pg->mdpage.pvh_slock);
                }
        }
-#ifdef PMAP_CACHE_VIPT
-       KASSERT((pg->mdpage.pvh_attrs & PVF_MOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
-       KASSERT(((pg->mdpage.pvh_attrs & PVF_WRITE) == 0) == 
(pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0));
-#endif
 
        pmap_release_pmap_lock(pm);
        PMAP_MAP_TO_HEAD_UNLOCK();
@@ -3228,7 +3117,7 @@ pmap_kremove_pg(struct vm_page *pg, vaddr_t va)
         */
        if ((pg->mdpage.pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC
            && (pv->pv_flags & PVF_WRITE) != 0) {
-               if (SLIST_EMPTY(&pg->mdpage.pvh_list)) {
+               if (pg->mdpage.pvh_list == NULL) {
                        pg->mdpage.pvh_attrs &= ~PVF_EXEC;
                        PMAPCOUNT(exec_discarded_kremove);
                } else {
@@ -3316,13 +3205,11 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
                        KASSERT((pg->mdpage.pvh_attrs & PVF_NC) == 0);
                        /* if there is a color conflict, evict from cache. */
                        if (pmap_is_page_colored_p(pg)
-                           && ((va ^ pg->mdpage.pvh_attrs) & 
arm_cache_prefer_mask)) {
-                               PMAPCOUNT(vac_color_change);
-                               pmap_flush_page(pg, true);
-                       }
+                           && ((va ^ pg->mdpage.pvh_attrs) & 
arm_cache_prefer_mask))
+                               pmap_flush_page(pg);
                        pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
                        pg->mdpage.pvh_attrs |= PVF_KMPAGE
-                           | PVF_COLORED | PVF_DIRTY
+                           | PVF_COLORED
                            | (va & arm_cache_prefer_mask);
 #endif
 #ifdef PMAP_CACHE_VIVT
@@ -3338,11 +3225,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
                        }
                        pmap_enter_pv(pg, pv, pmap_kernel(), va,
                            PVF_WIRED | PVF_KENTRY
-                           | (prot & VM_PROT_WRITE ? PVF_WRITE : 0));
-                       if ((prot & VM_PROT_WRITE)
-                           && !(pg->mdpage.pvh_attrs & PVF_NC))
-                               pg->mdpage.pvh_attrs |= PVF_DIRTY;
-                       KASSERT((prot & VM_PROT_WRITE) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
+                               | (prot & VM_PROT_WRITE ? PVF_WRITE : 0));
                        simple_lock(&pg->mdpage.pvh_slock);
                        pmap_vac_me_harder(pg, pmap_kernel(), va);
                        simple_unlock(&pg->mdpage.pvh_slock);
@@ -3803,7 +3686,7 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, 
int user)
                    printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 
0x%08lx\n",
                    pm, va, VM_PAGE_TO_PHYS(pg)));
 
-               pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD | PVF_DIRTY;
+               pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
                pv->pv_flags |= PVF_REF | PVF_MOD;
                simple_unlock(&pg->mdpage.pvh_slock);
 
@@ -4073,7 +3956,7 @@ pmap_activate(struct lwp *l)
        }
 
        /* No interrupts while we frob the TTB/DACR */
-       oldirqstate = disable_interrupts(IF32_bits);
+       oldirqstate = disable_interrupts(I32_bit | F32_bit);
 
        /*
         * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1
@@ -4325,7 +4208,7 @@ pmap_zero_page_generic(paddr_t phys)
        pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT];
 
 #ifdef DEBUG
-       if (!SLIST_EMPTY(&pg->mdpage.pvh_list))
+       if (pg->mdpage.pvh_list != NULL)
                panic("pmap_zero_page: page has mappings");
 #endif
 
@@ -4363,7 +4246,6 @@ pmap_zero_page_generic(paddr_t phys)
                pg->mdpage.pvh_attrs &= ~PVF_EXEC;
                PMAPCOUNT(exec_discarded_zero);
        }
-       pg->mdpage.pvh_attrs |= PVF_DIRTY;
 #endif
 }
 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
@@ -4375,7 +4257,7 @@ pmap_zero_page_xscale(paddr_t phys)
 #ifdef DEBUG
        struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
 
-       if (!SLIST_EMPTY(&pg->mdpage.pvh_list))
+       if (pg->mdpage.pvh_list != NULL)
                panic("pmap_zero_page: page has mappings");
 #endif
 
@@ -4421,7 +4303,7 @@ pmap_pageidlezero(paddr_t phys)
 
 
 #ifdef DEBUG
-       if (!SLIST_EMPTY(&pg->mdpage.pvh_list))
+       if (pg->mdpage.pvh_list != NULL)
                panic("pmap_pageidlezero: page has mappings");
 #endif
 
@@ -4509,7 +4391,7 @@ pmap_copy_page_generic(paddr_t src, paddr_t dst)
        pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT];
 
 #ifdef DEBUG
-       if (!SLIST_EMPTY(&dst_pg->mdpage.pvh_list))
+       if (dst_pg->mdpage.pvh_list != NULL)
                panic("pmap_copy_page: dst page has mappings");
 #endif
 
@@ -4526,7 +4408,7 @@ pmap_copy_page_generic(paddr_t src, paddr_t dst)
         */
        simple_lock(&src_pg->mdpage.pvh_slock);
 #ifdef PMAP_CACHE_VIVT
-       (void) pmap_clean_page(SLIST_FIRST(&src_pg->mdpage.pvh_list), true);
+       (void) pmap_clean_page(src_pg->mdpage.pvh_list, true);
 #endif
 
        /*
@@ -4580,7 +4462,6 @@ pmap_copy_page_generic(paddr_t src, paddr_t dst)
                dst_pg->mdpage.pvh_attrs &= ~PVF_EXEC;
                PMAPCOUNT(exec_discarded_copy);
        }
-       dst_pg->mdpage.pvh_attrs |= PVF_DIRTY;
 #endif
 }
 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
@@ -4593,7 +4474,7 @@ pmap_copy_page_xscale(paddr_t src, paddr_t dst)
 #ifdef DEBUG
        struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
 
-       if (!SLIST_EMPTY(&dst_pg->mdpage.pvh_list))
+       if (dst_pg->mdpage.pvh_list != NULL)
                panic("pmap_copy_page: dst page has mappings");
 #endif
 
@@ -4607,7 +4488,7 @@ pmap_copy_page_xscale(paddr_t src, paddr_t dst)
         */
        simple_lock(&src_pg->mdpage.pvh_slock);
 #ifdef PMAP_CACHE_VIVT
-       (void) pmap_clean_page(SLIST_FIRST(&src_pg->mdpage.pvh_list), true);
+       (void) pmap_clean_page(src_pg->mdpage.pvh_list, true);
 #endif
 
        /*
@@ -4683,7 +4564,7 @@ pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t 
*pap)
                 * This new page must not have any mappings.  Enter it via
                 * pmap_kenter_pa and let that routine do the hard work.
                 */
-               KASSERT(SLIST_EMPTY(&pg->mdpage.pvh_list));
+               KASSERT(pg->mdpage.pvh_list == NULL);
                pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE);
 #endif
        }
@@ -5114,7 +4995,7 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
        pmap_set_pt_cache_mode(l1pt, (vaddr_t)csrc_pte);
        pmap_alloc_specials(&virtual_avail, nptes, &cdstp, &cdst_pte);
        pmap_set_pt_cache_mode(l1pt, (vaddr_t)cdst_pte);
-       pmap_alloc_specials(&virtual_avail, nptes, &memhook, NULL);
+       pmap_alloc_specials(&virtual_avail, 1, (void *)&memhook, NULL);
        pmap_alloc_specials(&virtual_avail, round_page(MSGBUFSIZE) / PAGE_SIZE,
            (void *)&msgbufaddr, NULL);
 
@@ -6286,7 +6167,7 @@ pmap_dump_ncpg(pmap_t pm)
                    pg->mdpage.krw_mappings, pg->mdpage.kro_mappings,
                    pg->mdpage.urw_mappings, pg->mdpage.uro_mappings);
 
-               SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
+               for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
                        printf("   %c va 0x%08lx, flags 0x%x\n",
                            (pm == pv->pv_pmap) ? '*' : ' ',
                            pv->pv_va, pv->pv_flags);
diff --git a/sys/arch/arm/include/arm32/vmparam.h 
b/sys/arch/arm/include/arm32/vmparam.h
index 892fca3..ba9644c 100644
--- a/sys/arch/arm/include/arm32/vmparam.h
+++ b/sys/arch/arm/include/arm32/vmparam.h
@@ -109,7 +109,7 @@ extern vaddr_t virtual_end;
  */
 #define        __HAVE_VM_PAGE_MD
 struct vm_page_md {
-       SLIST_HEAD(,pv_entry) pvh_list;         /* pv_entry list */
+       struct pv_entry *pvh_list;              /* pv_entry list */
        struct simplelock pvh_slock;            /* lock on this head */
        int pvh_attrs;                          /* page attributes */
        u_int uro_mappings;
@@ -137,7 +137,7 @@ struct vm_page_md {
 
 #define        VM_MDPAGE_INIT(pg)                                              
\
 do {                                                                   \
-       SLIST_INIT(&(pg)->mdpage.pvh_list);                             \
+       (pg)->mdpage.pvh_list = NULL;                                   \
        simple_lock_init(&(pg)->mdpage.pvh_slock);                      \
        VM_MDPAGE_PVH_ATTRS_INIT(pg);                                   \
        (pg)->mdpage.uro_mappings = 0;                                  \
-- 
1.5.6.5



Home | Main Index | Thread Index | Old Index