Port-arm archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[PATCH, repost] ARM: VIPT cache coherency fix



ARM: cache coherency fix for platforms with VIPT cache

Dirty cache lines are not taken into account when checking for
possible aliases. Track these cachelines and do a cache flush
when we have such with a clashing cache color.

Patch originally from Imre Deak <imre.deak%teleca.com@localhost>,
http://mail-index.netbsd.org/port-arm/2008/08/06/msg000324.html

Fixes link failures and object file corruptions reported in
http://mail-index.netbsd.org/port-arm/2008/11/26/msg000464.html

Removed some asserts since they no longer hold.
---
 sys/arch/arm/arm32/pmap.c |   33 +++++++++++++++------------------
 1 files changed, 15 insertions(+), 18 deletions(-)

diff --git a/sys/arch/arm/arm32/pmap.c b/sys/arch/arm/arm32/pmap.c
index e9f5fd6..64bbf8a 100644
--- a/sys/arch/arm/arm32/pmap.c
+++ b/sys/arch/arm/arm32/pmap.c
@@ -877,7 +877,6 @@ pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, 
pmap_t pm,
                pg->mdpage.pvh_attrs |= PVF_KMOD;
        if ((pg->mdpage.pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
                pg->mdpage.pvh_attrs |= PVF_DIRTY;
-       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
 #endif
        if (pm == pmap_kernel()) {
                PMAPCOUNT(kernel_mappings);
@@ -892,6 +891,7 @@ pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, 
pmap_t pm,
                pg->mdpage.uro_mappings++;
 
 #ifdef PMAP_CACHE_VIPT
+       pmap_vac_me_harder(pg, pm, va);
        /*
         * If this is an exec mapping and its the first exec mapping
         * for this page, make sure to sync the I-cache.
@@ -1015,7 +1015,6 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, 
int skip_wired)
         */
        if (pg->mdpage.krw_mappings + pg->mdpage.urw_mappings == 0)
                pg->mdpage.pvh_attrs &= ~PVF_WRITE;
-       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
 #endif /* PMAP_CACHE_VIPT */
 
        return(pve);                            /* return removed pve */
@@ -1058,7 +1057,6 @@ pmap_modify_pv(struct vm_page *pg, pmap_t pm, vaddr_t va,
 #ifdef PMAP_CACHE_VIPT
                if ((pg->mdpage.pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
                        pg->mdpage.pvh_attrs |= PVF_DIRTY;
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
 #endif
        }
 
@@ -1105,7 +1103,6 @@ pmap_modify_pv(struct vm_page *pg, pmap_t pm, vaddr_t va,
                pmap_syncicache_page(pg);
                PMAPCOUNT(exec_synced_remap);
        }
-       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
 #endif
 
        PMAPCOUNT(remappings);
@@ -1855,7 +1852,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
 #endif
 
        KASSERT(!va || pm);
-       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs 
& (PVF_DIRTY|PVF_NC)));
 
        /* Already a conflict? */
        if (__predict_false(pg->mdpage.pvh_attrs & PVF_NC)) {
@@ -1915,7 +1911,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                        KASSERT(SLIST_FIRST(&pg->mdpage.pvh_list) != NULL);
                        KASSERT(SLIST_NEXT(SLIST_FIRST(&pg->mdpage.pvh_list), 
pv_link) != NULL);
                }
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
        } else if (!va) {
                KASSERT(pmap_is_page_colored_p(pg));
@@ -1923,7 +1918,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                    || (pg->mdpage.pvh_attrs & PVF_DIRTY));
                if (rw_mappings == 0)
                        pg->mdpage.pvh_attrs &= ~PVF_WRITE;
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
                return;
        } else if (!pmap_is_page_colored_p(pg)) {
@@ -1933,8 +1927,7 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
                pg->mdpage.pvh_attrs |= PVF_COLORED
                    | (va & arm_cache_prefer_mask)
-                   | (rw_mappings > 0 ? PVF_WRITE : 0);
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
+                   | (rw_mappings > 0 ? (PVF_WRITE | PVF_DIRTY) : 0);
                KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
                return;
        } else if (((pg->mdpage.pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) {
@@ -1958,6 +1951,8 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                                }
                        }
                        pg->mdpage.pvh_attrs |= PVF_WRITE;
+                       if (!bad_alias)
+                               pg->mdpage.pvh_attrs |= PVF_DIRTY;
                }
                /* If no conflicting colors, set everything back to cached */
                if (!bad_alias) {
@@ -1975,7 +1970,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                                PMAPCOUNT(vac_color_ok);
 
                        /* matching color, just return */
-                       KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                        KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
                        return;
                }
@@ -1985,9 +1979,8 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                /* color conflict.  evict from cache. */
 
                pmap_flush_page(pg, true);
-               pg->mdpage.pvh_attrs &= ~PVF_COLORED;
+               pg->mdpage.pvh_attrs &= ~(PVF_COLORED | PVF_DIRTY);
                pg->mdpage.pvh_attrs |= PVF_NC;
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                PMAPCOUNT(vac_color_erase);
        } else if (rw_mappings == 0
                   && (pg->mdpage.pvh_attrs & PVF_KMPAGE) == 0) {
@@ -1996,8 +1989,15 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, 
vaddr_t va)
                /*
                 * If the page has dirty cache lines, clean it.
                 */
-               if (pg->mdpage.pvh_attrs & PVF_DIRTY)
+               if (pg->mdpage.pvh_attrs & PVF_DIRTY) {
+                       /*
+                        * Flush the page b/c there might be dirty
+                        * cache lines.
+                        */
                        pmap_flush_page(pg, false);
+                       pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_DIRTY;
+                       pg->mdpage.pvh_attrs |= va & arm_cache_prefer_mask;
+               }
 
                /*
                 * If this is the first remapping (we know that there are no
@@ -2006,14 +2006,12 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, 
vaddr_t va)
                 * we don't have to do anything.
                 */
                if (ro_mappings == 1) {
-                       KASSERT(((pg->mdpage.pvh_attrs ^ va) & 
arm_cache_prefer_mask) != 0);
                        pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
                        pg->mdpage.pvh_attrs |= (va & arm_cache_prefer_mask);
                        PMAPCOUNT(vac_color_change);
                } else {
                        PMAPCOUNT(vac_color_blind);
                }
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & 
PVF_WRITE));
                return;
        } else {
@@ -2022,6 +2020,7 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
 
                /* color conflict.  evict from cache. */
                pmap_flush_page(pg, true);
+               pg->mdpage.pvh_attrs &= ~PVF_DIRTY;
 
                /* the list can't be empty because this was a enter/modify */
                pv = SLIST_FIRST(&pg->mdpage.pvh_list);
@@ -2038,7 +2037,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                                if (pg->mdpage.pvh_attrs & PVF_DMOD)
                                        pg->mdpage.pvh_attrs |= PVF_DIRTY;
                                PMAPCOUNT(vac_color_change);
-                               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 
|| (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                                KASSERT((rw_mappings == 0) == 
!(pg->mdpage.pvh_attrs & PVF_WRITE));
                                return;
                        }
@@ -2047,7 +2045,6 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                pg->mdpage.pvh_attrs &= ~PVF_COLORED;
                pg->mdpage.pvh_attrs |= PVF_NC;
                PMAPCOUNT(vac_color_erase);
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
        }
 
   fixup:
@@ -2809,6 +2806,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t 
prot, int flags)
                        oflags = pmap_modify_pv(pg, pm, va,
                            PVF_WRITE | PVF_EXEC | PVF_WIRED |
                            PVF_MOD | PVF_REF, nflags);
+                       pmap_vac_me_harder(pg, pm, va);
                        simple_unlock(&pg->mdpage.pvh_slock);
 
 #ifdef PMAP_CACHE_VIVT
@@ -2986,7 +2984,6 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t 
prot, int flags)
 #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC)
        if (pg) {
                simple_lock(&pg->mdpage.pvh_slock);
-               KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || 
(pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
                KASSERT(((pg->mdpage.pvh_attrs & PVF_WRITE) == 0) == 
(pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0));
                simple_unlock(&pg->mdpage.pvh_slock);
        }


 
_________________________________________
 
WhatÂs Your Idea of Tomorrow ? Let Teleca help you realize it. Visit us at 
Mobile World Congress 2009.
http://www.teleca.com/mwc2009
 
 
 


Home | Main Index | Thread Index | Old Index