Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch Two changes:



details:   https://anonhg.NetBSD.org/src/rev/b5cba66f6b87
branches:  trunk
changeset: 449554:b5cba66f6b87
user:      maxv <maxv%NetBSD.org@localhost>
date:      Sun Mar 10 16:30:01 2019 +0000

description:
Two changes:

 * Allow large pages to be passed in pmap_pdes_valid, this happens under
   DDB when it reads RIP (.text), called via pmap_extract.

 * Invert a branch in pmap_extract, so that 'l_cpu' is not touched if we're
   dealing with the kernel pmap.

This fixes 'boot -d'.

diffstat:

 sys/arch/x86/include/pmap.h |  11 ++-----
 sys/arch/x86/x86/pmap.c     |  64 ++++++++++++++++++++++++++++----------------
 sys/arch/xen/x86/xen_pmap.c |   8 +++--
 3 files changed, 48 insertions(+), 35 deletions(-)

diffs (265 lines):

diff -r 59ad31c4b947 -r b5cba66f6b87 sys/arch/x86/include/pmap.h
--- a/sys/arch/x86/include/pmap.h       Sun Mar 10 15:45:26 2019 +0000
+++ b/sys/arch/x86/include/pmap.h       Sun Mar 10 16:30:01 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.h,v 1.99 2019/03/09 08:42:26 maxv Exp $   */
+/*     $NetBSD: pmap.h,v 1.100 2019/03/10 16:30:01 maxv Exp $  */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -372,7 +372,8 @@
                    pd_entry_t * const **);
 void           pmap_unmap_ptes(struct pmap *, struct pmap *);
 
-int            pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *);
+bool           pmap_pdes_valid(vaddr_t, pd_entry_t * const *, pd_entry_t *,
+                   int *lastlvl);
 
 u_int          x86_mmap_flags(paddr_t);
 
@@ -424,12 +425,6 @@
  * inline functions
  */
 
-__inline static bool __unused
-pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
-{
-       return pmap_pdes_invalid(va, pdes, lastpde) == 0;
-}
-
 /*
  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
  *     if hardware doesn't support one-page flushing)
diff -r 59ad31c4b947 -r b5cba66f6b87 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c   Sun Mar 10 15:45:26 2019 +0000
+++ b/sys/arch/x86/x86/pmap.c   Sun Mar 10 16:30:01 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.329 2019/03/09 08:42:26 maxv Exp $  */
+/*     $NetBSD: pmap.c,v 1.330 2019/03/10 16:30:01 maxv Exp $  */
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.329 2019/03/09 08:42:26 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.330 2019/03/10 16:30:01 maxv Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -3031,22 +3031,28 @@
  * some misc. functions
  */
 
-int
-pmap_pdes_invalid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
+bool
+pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde,
+    int *lastlvl)
 {
-       int i;
        unsigned long index;
        pd_entry_t pde;
+       int i;
 
        for (i = PTP_LEVELS; i > 1; i--) {
                index = pl_i(va, i);
                pde = pdes[i - 2][index];
-               if ((pde & PTE_P) == 0)
-                       return i;
+               if ((pde & PTE_P) == 0) {
+                       *lastlvl = i;
+                       return false;
+               }
+               if (pde & PTE_PS)
+                       break;
        }
        if (lastpde != NULL)
                *lastpde = pde;
-       return 0;
+       *lastlvl = i;
+       return true;
 }
 
 /*
@@ -3063,6 +3069,7 @@
        paddr_t pa;
        lwp_t *l;
        bool hard, rv;
+       int lvl;
 
        if (__predict_false(pmap->pm_extract != NULL)) {
                return (*pmap->pm_extract)(pmap, va, pap);
@@ -3083,8 +3090,8 @@
 
        kpreempt_disable();
        ci = l->l_cpu;
-       if (__predict_true(!ci->ci_want_pmapload && ci->ci_pmap == pmap) ||
-           pmap == pmap_kernel()) {
+       if (pmap == pmap_kernel() ||
+           __predict_true(!ci->ci_want_pmapload && ci->ci_pmap == pmap)) {
                /*
                 * no need to lock, because it's pmap_kernel() or our
                 * own pmap and is active.  if a user pmap, the caller
@@ -3101,14 +3108,17 @@
                hard = true;
                pmap_map_ptes(pmap, &pmap2, &ptes, &pdes);
        }
-       if (pmap_pdes_valid(va, pdes, &pde)) {
-               pte = ptes[pl1_i(va)];
-               if (pde & PTE_PS) {
+       if (pmap_pdes_valid(va, pdes, &pde, &lvl)) {
+               if (lvl == 2) {
                        pa = (pde & PTE_LGFRAME) | (va & (NBPD_L2 - 1));
                        rv = true;
-               } else if (__predict_true((pte & PTE_P) != 0)) {
-                       pa = pmap_pte2pa(pte) | (va & (NBPD_L1 - 1));
-                       rv = true;
+               } else {
+                       KASSERT(lvl == 1);
+                       pte = ptes[pl1_i(va)];
+                       if (__predict_true((pte & PTE_P) != 0)) {
+                               pa = pmap_pte2pa(pte) | (va & (NBPD_L1 - 1));
+                               rv = true;
+                       }
                }
        }
        if (__predict_false(hard)) {
@@ -3531,6 +3541,7 @@
        vaddr_t blkendva, va = sva;
        struct vm_page *ptp;
        struct pmap *pmap2;
+       int lvl;
 
        if (__predict_false(pmap->pm_remove != NULL)) {
                (*pmap->pm_remove)(pmap, sva, eva);
@@ -3545,7 +3556,8 @@
         */
 
        if (va + PAGE_SIZE == eva) {
-               if (pmap_pdes_valid(va, pdes, &pde)) {
+               if (pmap_pdes_valid(va, pdes, &pde, &lvl)) {
+                       KASSERT(lvl == 1);
 
                        /* PA of the PTP */
                        ptppa = pmap_pte2pa(pde);
@@ -3572,19 +3584,17 @@
                                pmap_free_ptp(pmap, ptp, va, ptes, pdes);
                }
        } else for (/* null */ ; va < eva ; va = blkendva) {
-               int lvl;
-
                /* determine range of block */
                blkendva = x86_round_pdr(va+1);
                if (blkendva > eva)
                        blkendva = eva;
 
-               lvl = pmap_pdes_invalid(va, pdes, &pde);
-               if (lvl != 0) {
+               if (!pmap_pdes_valid(va, pdes, &pde, &lvl)) {
                        /* Skip a range corresponding to an invalid pde. */
                        blkendva = (va & ptp_frames[lvl - 1]) + nbpd[lvl - 1];
                        continue;
                }
+               KASSERT(lvl == 1);
 
                /* PA of the PTP */
                ptppa = pmap_pte2pa(pde);
@@ -4003,6 +4013,7 @@
        pt_entry_t * const *pdes;
        struct pmap *pmap2;
        vaddr_t blockend, va;
+       int lvl;
 
        KASSERT(curlwp->l_md.md_gc_pmap != pmap);
 
@@ -4034,10 +4045,11 @@
                        blockend = eva;
 
                /* Is it a valid block? */
-               if (!pmap_pdes_valid(va, pdes, NULL)) {
+               if (!pmap_pdes_valid(va, pdes, NULL, &lvl)) {
                        continue;
                }
                KASSERT(va < VM_MAXUSER_ADDRESS || va >= VM_MAX_ADDRESS);
+               KASSERT(lvl == 1);
 
                spte = &ptes[pl1_i(va)];
                epte = &ptes[pl1_i(blockend)];
@@ -4078,6 +4090,7 @@
        pt_entry_t *ptes, *ptep, opte;
        pd_entry_t * const *pdes;
        struct pmap *pmap2;
+       int lvl;
 
        if (__predict_false(pmap->pm_unwire != NULL)) {
                (*pmap->pm_unwire)(pmap, va);
@@ -4088,9 +4101,10 @@
        kpreempt_disable();
        pmap_map_ptes(pmap, &pmap2, &ptes, &pdes);
 
-       if (!pmap_pdes_valid(va, pdes, NULL)) {
+       if (!pmap_pdes_valid(va, pdes, NULL, &lvl)) {
                panic("%s: invalid PDE va=%#" PRIxVADDR, __func__, va);
        }
+       KASSERT(lvl == 1);
 
        ptep = &ptes[pl1_i(va)];
        opte = *ptep;
@@ -4623,6 +4637,7 @@
        pd_entry_t * const *pdes;
        struct pmap *pmap2;
        vaddr_t blkendva;
+       int lvl;
 
        /*
         * if end is out of range truncate.
@@ -4651,8 +4666,9 @@
                        blkendva = eva;
 
                /* valid block? */
-               if (!pmap_pdes_valid(sva, pdes, NULL))
+               if (!pmap_pdes_valid(sva, pdes, NULL, &lvl))
                        continue;
+               KASSERT(lvl == 1);
 
                pte = &ptes[pl1_i(sva)];
                for (/* null */; sva < blkendva ; sva += PAGE_SIZE, pte++) {
diff -r 59ad31c4b947 -r b5cba66f6b87 sys/arch/xen/x86/xen_pmap.c
--- a/sys/arch/xen/x86/xen_pmap.c       Sun Mar 10 15:45:26 2019 +0000
+++ b/sys/arch/xen/x86/xen_pmap.c       Sun Mar 10 16:30:01 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: xen_pmap.c,v 1.30 2019/03/09 08:42:25 maxv Exp $       */
+/*     $NetBSD: xen_pmap.c,v 1.31 2019/03/10 16:30:01 maxv Exp $       */
 
 /*
  * Copyright (c) 2007 Manuel Bouyer.
@@ -101,7 +101,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.30 2019/03/09 08:42:25 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.31 2019/03/10 16:30:01 maxv Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -212,15 +212,17 @@
        pd_entry_t pde;
        pd_entry_t * const *pdes;
        struct pmap *pmap2;
+       int lvl;
 
        kpreempt_disable();
        pmap_map_ptes(pmap, &pmap2, &ptes, &pdes);
-       if (!pmap_pdes_valid(va, pdes, &pde)) {
+       if (!pmap_pdes_valid(va, pdes, &pde, &lvl)) {
                pmap_unmap_ptes(pmap, pmap2);
                kpreempt_enable();
                return false;
        }
 
+       KASSERT(lvl == 1);
        pte = ptes[pl1_i(va)];
        pmap_unmap_ptes(pmap, pmap2);
        kpreempt_enable();



Home | Main Index | Thread Index | Old Index