Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/arm/arm32 Replace most uses of pmap_pde_p with pmap...



details:   https://anonhg.NetBSD.org/src/rev/d60e6b5b1c89
branches:  trunk
changeset: 516954:d60e6b5b1c89
user:      rearnsha <rearnsha%NetBSD.org@localhost>
date:      Sat Nov 03 00:06:02 2001 +0000

description:
Replace most uses of pmap_pde_p with pmap_pde_page, since that is what
we need later in the code.  This fixes a fatal kernel fault in
pmap_modified_emulation if a user application tries to access a kernel
address that is section-mapped.

Add a diagnostic that detects attempts to call pmap_kenter_pa with a
va that is section-mapped.

diffstat:

 sys/arch/arm/arm32/pmap.c |  34 ++++++++++++++++++++--------------
 1 files changed, 20 insertions(+), 14 deletions(-)

diffs (119 lines):

diff -r 714db2309caf -r d60e6b5b1c89 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Sat Nov 03 00:01:23 2001 +0000
+++ b/sys/arch/arm/arm32/pmap.c Sat Nov 03 00:06:02 2001 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.29 2001/11/01 15:49:16 rearnsha Exp $       */
+/*     $NetBSD: pmap.c,v 1.30 2001/11/03 00:06:02 rearnsha Exp $       */
 
 /*
  * Copyright (c) 2001 Richard Earnshaw
@@ -142,7 +142,7 @@
 #include <machine/param.h>
 #include <machine/katelib.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.29 2001/11/01 15:49:16 rearnsha Exp $");        
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.30 2001/11/03 00:06:02 rearnsha Exp $");        
 #ifdef PMAP_DEBUG
 #define        PDEBUG(_lev_,_stat_) \
        if (pmap_debug_level >= (_lev_)) \
@@ -2345,7 +2345,7 @@
        ptes = pmap_map_ptes(pmap);
        /* Get a page table pointer */
        while (sva < eva) {
-               if (pmap_pde_v(pmap_pde(pmap, sva)))
+               if (pmap_pde_page(pmap_pde(pmap, sva)))
                        break;
                sva = (sva & PD_MASK) + NBPD;
        }
@@ -2362,7 +2362,7 @@
        while (sva < eva) {
                /* Check if we can move to the next PDE (l1 chunk) */
                if (!(sva & PT_MASK))
-                       if (!pmap_pde_v(pmap_pde(pmap, sva))) {
+                       if (!pmap_pde_page(pmap_pde(pmap, sva))) {
                                sva += NBPD;
                                pte += arm_byte_to_page(NBPD);
                                continue;
@@ -2506,8 +2506,8 @@
                PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
                    pv->pv_va, pv->pv_flags));
 #ifdef DEBUG
-               if (!pmap_pde_v(pmap_pde(pmap, va)) || !pmap_pte_v(pte)
-                           || pmap_pte_pa(pte) != pa)
+               if (!pmap_pde_page(pmap_pde(pmap, pv->pv_pa)) ||
+                   !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa)
                        panic("pmap_remove_all: bad mapping");
 #endif /* DEBUG */
 
@@ -2593,7 +2593,7 @@
         * the following loop.
         */
        while (sva < eva) {
-               if (pmap_pde_v(pmap_pde(pmap, sva)))
+               if (pmap_pde_page(pmap_pde(pmap, sva)))
                        break;
                sva = (sva & PD_MASK) + NBPD;
        }
@@ -2603,7 +2603,7 @@
        while (sva < eva) {
                /* only check once in a while */
                if ((sva & PT_MASK) == 0) {
-                       if (!pmap_pde_v(pmap_pde(pmap, sva))) {
+                       if (!pmap_pde_page(pmap_pde(pmap, sva))) {
                                /* We can race ahead here, to the next pde. */
                                sva += NBPD;
                                pte += arm_byte_to_page(NBPD);
@@ -2898,8 +2898,13 @@
        pt_entry_t *pte;
        struct vm_page *pg;
  
-       if (!pmap_pde_v(pmap_pde(pmap, va))) {
-
+       if (!pmap_pde_page(pmap_pde(pmap, va))) {
+
+#ifdef DIAGNOSTIC
+               if (pmap_pde_v(pmap_pde(pmap, va)))
+                       panic("Trying to map kernel page into section mapping"
+                           " VA=%lx PA=%lx", va, pa);
+#endif
                /* 
                 * For the kernel pmaps it would be better to ensure
                 * that they are always present, and to grow the
@@ -2939,7 +2944,7 @@
                 * regions of memory.
                 */
 
-               KASSERT(pmap_pde_v(pmap_pde(pmap_kernel(), va)));
+               KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
                pte = vtopte(va);
                cpu_cache_purgeID_rng(va, PAGE_SIZE);
                *pte = 0;
@@ -3051,7 +3056,7 @@
            pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
 
        /* Do we have a valid pde ? If not we don't have a page table */
-       if (!pmap_pde_v(pmap_pde(pmap, va))) {
+       if (!pmap_pde_page(pmap_pde(pmap, va))) {
                PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
                    pmap_pde(pmap, va)));
                return(NULL); 
@@ -3146,8 +3151,9 @@
        /*
         * If there is no pte then there is no page table etc.
         * Is the pte valid ? If not then no paged is actually mapped here
+        * XXX Should we handle section mappings?
         */
-       if (!pmap_pde_v(pmap_pde(pmap, va)) || !pmap_pte_v(pte)){
+       if (!pmap_pde_page(pmap_pde(pmap, va)) || !pmap_pte_v(pte)){
            pmap_unmap_ptes(pmap);
            return (FALSE);
        }
@@ -3656,7 +3662,7 @@
 {
     struct vm_page *ptp;
 
-    if (pmap_pde_v(pmap_pde(pmap, va))) {
+    if (pmap_pde_page(pmap_pde(pmap, va))) {
 
        /* valid... check hint (saves us a PA->PG lookup) */
 #if 0



Home | Main Index | Thread Index | Old Index