Port-arm archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[PATCH] ARM: VIPT cache coherency fix



Currently page loaning (at least) is broken on ARM VIPT platforms.
The reason is that possible dirty cache lines are not taken into account
at the moment, following is a fix for this.

The fix is validated against the latest breakage related to the socket
page loaning reported earlier on the list.

--Imre

diff --git a/sys/arch/arm/arm32/pmap.c b/sys/arch/arm/arm32/pmap.c
index 2b62477..3ed914d 100644
--- a/sys/arch/arm/arm32/pmap.c
+++ b/sys/arch/arm/arm32/pmap.c
@@ -877,6 +877,7 @@ pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, 
pmap_t pm,
                pg->mdpage.uro_mappings++;
 
 #ifdef PMAP_CACHE_VIPT
+       pmap_vac_me_harder(pg, pm, va);
        /*
         * If this is an exec mapping and its the first exec mapping
         * for this page, make sure to sync the I-cache.
@@ -1833,6 +1834,7 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
        /* Already a conflict? */
        if (__predict_false(pg->mdpage.pvh_attrs & PVF_NC)) {
                /* just an add, things are already non-cached */
+               KASSERT(!(pg->mdpage.pvh_attrs & PVF_DIRTY));
                bad_alias = false;
                if (va) {
                        PMAPCOUNT(vac_color_none);
@@ -1849,17 +1851,21 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, 
vaddr_t va)
                        tst_mask = pv->pv_va;
                        pv = pv->pv_next;
                }
+
+               tst_mask &= arm_cache_prefer_mask;
+
                /*
                 * Only check for a bad alias if we have writable mappings.
                 */
                if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0) {
-                       tst_mask &= arm_cache_prefer_mask;
                        for (; pv && !bad_alias; pv = pv->pv_next) {
                                /* if there's a bad alias, stop checking. */
                                if (tst_mask != (pv->pv_va & 
arm_cache_prefer_mask))
                                        bad_alias = true;
                        }
                        pg->mdpage.pvh_attrs |= PVF_WRITE;
+                       if (!bad_alias)
+                               pg->mdpage.pvh_attrs |= PVF_DIRTY;
                }
                /* If no conflicting colors, set everything back to cached */
                if (!bad_alias) {
@@ -1873,19 +1879,21 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, 
vaddr_t va)
                KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 
0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
        } else if (!va) {
                KASSERT(pmap_is_page_colored_p(pg));
-               pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) | arm_cache_prefer_mask;
+               KASSERT(!(pg->mdpage.pvh_attrs & PVF_WRITE) ||
+                        (pg->mdpage.pvh_attrs & PVF_DIRTY));
                if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0)
                        pg->mdpage.pvh_attrs &= ~PVF_WRITE;
                KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 
0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
                return;
        } else if (!pmap_is_page_colored_p(pg)) {
+               KASSERT(!(pg->mdpage.pvh_attrs & (PVF_WRITE | PVF_DIRTY)));
                /* not colored so we just use its color */
                PMAPCOUNT(vac_color_new);
                pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
                pg->mdpage.pvh_attrs |= PVF_COLORED
                    | (va & arm_cache_prefer_mask);
                if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0)
-                       pg->mdpage.pvh_attrs |= PVF_WRITE;
+                       pg->mdpage.pvh_attrs |= PVF_WRITE | PVF_DIRTY;
                KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 
0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
                return;
        } else if (((pg->mdpage.pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) {
@@ -1908,6 +1916,8 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
                                }
                        }
                        pg->mdpage.pvh_attrs |= PVF_WRITE;
+                       if (!bad_alias)
+                               pg->mdpage.pvh_attrs |= PVF_DIRTY;
                }
                /* If no conflicting colors, set everything back to cached */
                if (!bad_alias) {
@@ -1925,14 +1935,20 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, 
vaddr_t va)
                /* color conflict.  evict from cache. */
 
                pmap_flush_page(pg);
-               pg->mdpage.pvh_attrs &= ~PVF_COLORED;
+               pg->mdpage.pvh_attrs &= ~(PVF_COLORED | PVF_DIRTY);
                pg->mdpage.pvh_attrs |= PVF_NC;
        } else if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0
                   && (pg->mdpage.pvh_attrs & PVF_KMPAGE) == 0) {
                KASSERT((pg->mdpage.pvh_attrs & PVF_WRITE) == 0);
-               /*
-                * If all the mappings are read-only, don't do anything.
-                */
+               if (pg->mdpage.pvh_attrs & PVF_DIRTY) {
+                       /*
+                        * Flush the page b/c there might be dirty
+                        * cache lines.
+                        */
+                       pmap_flush_page(pg);
+                       pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_DIRTY;
+                       pg->mdpage.pvh_attrs |= va & arm_cache_prefer_mask;
+               }
                PMAPCOUNT(vac_color_blind);
                KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 
0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
                return;
@@ -1942,6 +1958,7 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t 
va)
 
                /* color conflict.  evict from cache. */
                pmap_flush_page(pg);
+               pg->mdpage.pvh_attrs &= ~PVF_DIRTY;
 
                /* the list can't be empty because this was a enter/modify */
                pv = pg->mdpage.pvh_list;
@@ -2694,6 +2711,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t 
prot, int flags)
                        oflags = pmap_modify_pv(pg, pm, va,
                            PVF_WRITE | PVF_EXEC | PVF_WIRED |
                            PVF_MOD | PVF_REF, nflags);
+                       pmap_vac_me_harder(pg, pm, va);
                        simple_unlock(&pg->mdpage.pvh_slock);
 
 #ifdef PMAP_CACHE_VIVT



Home | Main Index | Thread Index | Old Index