Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/arm Add l2pte_set and l2pte_reset inlines to set/re...



details:   https://anonhg.NetBSD.org/src/rev/6df9a1a3fa78
branches:  trunk
changeset: 787799:6df9a1a3fa78
user:      matt <matt%NetBSD.org@localhost>
date:      Wed Jul 03 21:37:35 2013 +0000

description:
Add l2pte_set and l2pte_reset inlines to set/reset a pte.  These will be
used to support > 4KB pages sizes.
Don't use >> L1_S_SHIFT, use L1_IDX() instead.

diffstat:

 sys/arch/arm/arm32/pmap.c         |  333 ++++++++++++++++++-------------------
 sys/arch/arm/include/arm32/pmap.h |   23 ++-
 2 files changed, 188 insertions(+), 168 deletions(-)

diffs (truncated from 886 to 300 lines):

diff -r 99f65f8c27d7 -r 6df9a1a3fa78 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Wed Jul 03 21:20:45 2013 +0000
+++ b/sys/arch/arm/arm32/pmap.c Wed Jul 03 21:37:35 2013 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.261 2013/07/03 15:30:24 matt Exp $  */
+/*     $NetBSD: pmap.c,v 1.262 2013/07/03 21:37:35 matt Exp $  */
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -212,7 +212,7 @@
 #include <arm/cpuconf.h>
 #include <arm/arm32/katelib.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.261 2013/07/03 15:30:24 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.262 2013/07/03 21:37:35 matt Exp $");
 
 #ifdef PMAP_DEBUG
 
@@ -1496,8 +1496,6 @@
 pmap_l2ptp_ctor(void *arg, void *v, int flags)
 {
 #ifndef PMAP_INCLUDE_PTE_SYNC
-       struct l2_bucket *l2b;
-       pt_entry_t *ptep, pte;
        vaddr_t va = (vaddr_t)v & ~PGOFSET;
 
        /*
@@ -1508,16 +1506,18 @@
         * page tables, we simply fix up the cache-mode here if it's not
         * correct.
         */
-       l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+       struct l2_bucket * const l2b = pmap_get_l2_bucket(pmap_kernel(), va);
        KDASSERT(l2b != NULL);
-       ptep = &l2b->l2b_kva[l2pte_index(va)];
-       pte = *ptep;
-
-       if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
+       pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)];
+       pt_entry_t opte = *ptep;
+
+       if ((opte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
                /*
                 * Page tables must have the cache-mode set to Write-Thru.
                 */
-               *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
+               const pt_entry_t npte = (pte & ~L2_S_CACHE_MASK)
+                   | pte_l2_s_cache_mode_pt;
+               l2pte_set(ptep, npte, opte);
                PTE_SYNC(ptep);
                cpu_tlb_flushD_SE(va);
                cpu_cpwait();
@@ -1556,7 +1556,8 @@
                 * Map the vector page.
                 */
                pmap_enter(pm, vector_page, systempage.pv_pa,
-                   VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
+                   VM_PROT_READ | VM_PROT_EXECUTE,
+                   VM_PROT_READ | VM_PROT_EXECUTE | PMAP_WIRED);
                pmap_update(pm);
 
                pm->pm_pl1vec = pmap_l1_kva(pm) + L1_IDX(vector_page);
@@ -1730,8 +1731,6 @@
 {
        pmap_t kpmap = pmap_kernel();
        struct pv_entry *pv, *npv = NULL;
-       struct l2_bucket *l2b;
-       pt_entry_t *ptep, pte;
        u_int entries = 0;
        u_int writable = 0;
        u_int cacheable_entries = 0;
@@ -1783,13 +1782,16 @@
 
                        pv->pv_flags |= PVF_NC;
 
-                       l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
+                       struct l2_bucket * const l2b
+                           = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
                        KDASSERT(l2b != NULL);
-                       ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
-                       pte = *ptep & ~L2_S_CACHE_MASK;
+                       pt_entry_t * const ptep
+                           = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+                       const pt_entry_t opte = *ptep;
+                       pt_entry_t npte = opte & ~L2_S_CACHE_MASK;
 
                        if ((va != pv->pv_va || pm != pv->pv_pmap)
-                           && l2pte_valid(pte)) {
+                           && l2pte_valid(npte)) {
 #ifdef PMAP_CACHE_VIVT
                                pmap_cache_wbinv_page(pv->pv_pmap, pv->pv_va,
                                    true, pv->pv_flags);
@@ -1798,7 +1800,7 @@
                                    pv->pv_flags);
                        }
 
-                       *ptep = pte;
+                       l2pte_set(ptep, npte, opte);
                        PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
                }
                cpu_cpwait();
@@ -1815,17 +1817,21 @@
 
                        pv->pv_flags &= ~PVF_NC;
 
-                       l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
+                       struct l2_bucket * const l2b
+                           = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
                        KDASSERT(l2b != NULL);
-                       ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
-                       pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode;
-
-                       if (l2pte_valid(pte)) {
+                       pt_entry_t * const ptep
+                           = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+                       const pt_entry_t opte = *ptep;
+                       pt_entry_t npte = (opte & ~L2_S_CACHE_MASK)
+                           | pte_l2_s_cache_mode;
+
+                       if (l2pte_valid(opte)) {
                                pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va,
                                    pv->pv_flags);
                        }
 
-                       *ptep = pte;
+                       l2pte_set(ptep, npte, opte);
                        PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
                }
        }
@@ -1839,8 +1845,6 @@
        struct pv_entry *pv;
        vaddr_t tst_mask;
        bool bad_alias;
-       struct l2_bucket *l2b;
-       pt_entry_t *ptep, pte, opte;
        const u_int
            rw_mappings = md->urw_mappings + md->krw_mappings,
            ro_mappings = md->uro_mappings + md->kro_mappings;
@@ -2098,27 +2102,27 @@
         * Turn cacheing on/off for all pages.
         */
        SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
-               l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
+               struct l2_bucket * const l2b = pmap_get_l2_bucket(pv->pv_pmap,
+                   pv->pv_va);
                KDASSERT(l2b != NULL);
-               ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
-               opte = *ptep;
-               pte = opte & ~L2_S_CACHE_MASK;
+               pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+               const pt_entry_t opte = *ptep;
+               pt_entry_t npte = opte & ~L2_S_CACHE_MASK;
                if (bad_alias) {
                        pv->pv_flags |= PVF_NC;
                } else {
                        pv->pv_flags &= ~PVF_NC;
-                       pte |= pte_l2_s_cache_mode;
+                       npte |= pte_l2_s_cache_mode;
                }
 
-               if (opte == pte)        /* only update is there's a change */
+               if (opte == npte)       /* only update is there's a change */
                        continue;
 
-               if (l2pte_valid(pte)) {
-                       pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va,
-                           pv->pv_flags);
+               if (l2pte_valid(npte)) {
+                       pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, pv->pv_flags);
                }
 
-               *ptep = pte;
+               l2pte_set(ptep, npte, opte);
                PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
        }
 }
@@ -2133,17 +2137,14 @@
 static void
 pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits)
 {
-       struct l2_bucket *l2b;
        struct pv_entry *pv;
-       pt_entry_t *ptep, npte, opte;
        pmap_t pm;
        vaddr_t va;
        u_int oflags;
 #ifdef PMAP_CACHE_VIPT
        const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs);
+       bool need_vac_me_harder = false;
        bool need_syncicache = false;
-       bool did_syncicache = false;
-       bool need_vac_me_harder = false;
 #endif
 
        NPDEBUG(PDB_BITS,
@@ -2155,8 +2156,9 @@
         * If we might want to sync the I-cache and we've modified it,
         * then we know we definitely need to sync or discard it.
         */
-       if (want_syncicache)
+       if (want_syncicache) {
                need_syncicache = md->pvh_attrs & PVF_MOD;
+       }
 #endif
        /*
         * Clear saved attributes (modify, reference)
@@ -2164,7 +2166,7 @@
        md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
 
        if (SLIST_EMPTY(&md->pvh_list)) {
-#ifdef PMAP_CACHE_VIPT
+#if defined(PMAP_CACHE_VIPT)
                if (need_syncicache) {
                        /*
                         * No one has it mapped, so just discard it.  The next
@@ -2193,11 +2195,12 @@
 
                pmap_acquire_pmap_lock(pm);
 
-               l2b = pmap_get_l2_bucket(pm, va);
+               struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, va);
                KDASSERT(l2b != NULL);
 
-               ptep = &l2b->l2b_kva[l2pte_index(va)];
-               npte = opte = *ptep;
+               pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
+               const pt_entry_t opte = *ptep;
+               pt_entry_t npte = opte;
 
                NPDEBUG(PDB_BITS,
                    printf(
@@ -2292,8 +2295,9 @@
                }
 
                if (npte != opte) {
-                       *ptep = npte;
+                       l2pte_set(ptep, npte, opte);
                        PTE_SYNC(ptep);
+
                        /* Flush the TLB entry if a current pmap. */
                        pmap_tlb_flush_SE(pm, pv->pv_va, oflags);
                }
@@ -2309,10 +2313,11 @@
        /*
         * If we need to sync the I-cache and we haven't done it yet, do it.
         */
-       if (need_syncicache && !did_syncicache) {
+       if (need_syncicache) {
                pmap_syncicache_page(md, pa);
                PMAPCOUNT(exec_synced_clearbit);
        }
+
        /*
         * If we are changing this to read-only, we need to call vac_me_harder
         * so we can change all the read-only pages to cacheable.  We pretend
@@ -2430,10 +2435,11 @@
        /*
         * Set up a PTE with the right coloring to flush existing cache lines.
         */
-       *ptep = L2_S_PROTO |
+       const pt_entry_t npte = L2_S_PROTO |
            pa
            | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE)
            | pte_l2_s_cache_mode;
+       l2pte_set(ptep, npte, 0);
        PTE_SYNC(ptep);
 
        /*
@@ -2443,7 +2449,7 @@
        /*
         * Unmap the page.
         */
-       *ptep = 0;
+       l2pte_reset(ptep);
        PTE_SYNC(ptep);
        pmap_tlb_flush_SE(pmap_kernel(), cdstp + va_offset, PVF_REF | PVF_EXEC);
 
@@ -2510,7 +2516,7 @@
        for (; va_offset <= end_va; va_offset += PAGE_SIZE) {
                const size_t pte_offset = va_offset >> PGSHIFT;
                pt_entry_t * const ptep = &cdst_pte[pte_offset];
-               const pt_entry_t oldpte = *ptep;
+               const pt_entry_t opte = *ptep;
 
                if (flush == PMAP_FLUSH_SECONDARY
                    && va_offset == (md->pvh_attrs & arm_cache_prefer_mask))
@@ -2522,14 +2528,16 @@
                 * Set up a PTE with the right coloring to flush
                 * existing cache entries.
                 */
-               *ptep = L2_S_PROTO
+               const pt_entry_t npte = L2_S_PROTO
                    | pa
                    | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE)
                    | pte_l2_s_cache_mode;
+               l2pte_set(ptep, npte, opte);
                PTE_SYNC(ptep);
 
                /*
-                * Flush it.



Home | Main Index | Thread Index | Old Index