Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/x86/x86 Fix a subtle but important bug in pmap_grow...



details:   https://anonhg.NetBSD.org/src/rev/d012c4e4feee
branches:  trunk
changeset: 824705:d012c4e4feee
user:      maxv <maxv%NetBSD.org@localhost>
date:      Thu Jun 15 13:42:55 2017 +0000

description:
Fix a subtle but important bug in pmap_growkernel. When adding new toplevel
slots to pmap_kernel, we are implicitly using the recursive slot; but this
slot is in the active pmap, which may not be pmap_kernel. Therefore, adding
L4 slots is fine in itself, but when adding L3 slots the kernel faults
since the L4 slots that were just added are not active on the cpu.

So far this has never been triggered, because the current va limit makes it
impossible to add a new L4 slot, and i386 only has one level so the kernel
cannot fault in a lower level.

Now the tree is grown in the current pmap (cpm), copied into pmap_kernel,
and propagated in the other pmaps as expected.

Note that we're using CPUF_PRESENT, because this function may be called
early, before cpu0 is attached. It does add to the current mess in the
cpu attach code, so it will probably have to be revisited later.

diffstat:

 sys/arch/x86/x86/pmap.c |  35 ++++++++++++++++++++++++-----------
 1 files changed, 24 insertions(+), 11 deletions(-)

diffs (107 lines):

diff -r 3296cc2d33b8 -r d012c4e4feee sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c   Thu Jun 15 12:45:10 2017 +0000
+++ b/sys/arch/x86/x86/pmap.c   Thu Jun 15 13:42:55 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.249 2017/06/15 09:31:48 maxv Exp $  */
+/*     $NetBSD: pmap.c,v 1.250 2017/06/15 13:42:55 maxv Exp $  */
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
@@ -171,7 +171,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.249 2017/06/15 09:31:48 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.250 2017/06/15 13:42:55 maxv Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -562,7 +562,7 @@
     vaddr_t, struct pv_entry **);
 
 static paddr_t pmap_get_physpage(void);
-static void pmap_alloc_level(vaddr_t, long *);
+static void pmap_alloc_level(struct pmap *, vaddr_t, long *);
 
 static bool pmap_reactivate(struct pmap *);
 
@@ -4325,7 +4325,7 @@
  * Used only by pmap_growkernel.
  */
 static void
-pmap_alloc_level(vaddr_t kva, long *needed_ptps)
+pmap_alloc_level(struct pmap *cpm, vaddr_t kva, long *needed_ptps)
 {
        unsigned long i;
        paddr_t pa;
@@ -4338,7 +4338,7 @@
 
        for (level = PTP_LEVELS; level > 1; level--) {
                if (level == PTP_LEVELS)
-                       pdep = pmap_kernel()->pm_pdir;
+                       pdep = cpm->pm_pdir;
                else
                        pdep = normal_pdes[level - 2];
                index = pl_i_roundup(kva, level);
@@ -4398,10 +4398,11 @@
 pmap_growkernel(vaddr_t maxkvaddr)
 {
        struct pmap *kpm = pmap_kernel();
+       struct pmap *cpm;
 #if !defined(XEN) || !defined(__x86_64__)
        struct pmap *pm;
+#endif
        long old;
-#endif
        int s, i;
        long needed_kptp[PTP_LEVELS], target_nptp;
        bool invalidate = false;
@@ -4416,9 +4417,7 @@
        }
 
        maxkvaddr = x86_round_pdr(maxkvaddr);
-#if !defined(XEN) || !defined(__x86_64__)
        old = nkptp[PTP_LEVELS - 1];
-#endif
 
        /* Initialize needed_kptp. */
        for (i = PTP_LEVELS - 1; i >= 1; i--) {
@@ -4431,12 +4430,28 @@
                needed_kptp[i] = target_nptp - nkptp[i];
        }
 
-       pmap_alloc_level(pmap_maxkvaddr, needed_kptp);
+       /* Get the current pmap */
+       if (__predict_true(cpu_info_primary.ci_flags & CPUF_PRESENT)) {
+               cpm = curcpu()->ci_pmap;
+       } else {
+               cpm = kpm;
+       }
+
+       pmap_alloc_level(cpm, pmap_maxkvaddr, needed_kptp);
 
        /*
         * If the number of top level entries changed, update all pmaps.
         */
        if (needed_kptp[PTP_LEVELS - 1] != 0) {
+               size_t newpdes;
+               newpdes = nkptp[PTP_LEVELS - 1] - old;
+
+               if (cpm != kpm) {
+                       memcpy(&kpm->pm_pdir[PDIR_SLOT_KERN + old],
+                           &cpm->pm_pdir[PDIR_SLOT_KERN + old],
+                           newpdes * sizeof(pd_entry_t));
+               }
+
 #ifdef XEN
 #ifdef __x86_64__
                /* nothing, kernel entries are never entered in user pmap */
@@ -4455,8 +4470,6 @@
                mutex_exit(&pmaps_lock);
 #endif /* __x86_64__ */
 #else /* XEN */
-               unsigned newpdes;
-               newpdes = nkptp[PTP_LEVELS - 1] - old;
                mutex_enter(&pmaps_lock);
                LIST_FOREACH(pm, &pmaps, pm_list) {
                        memcpy(&pm->pm_pdir[PDIR_SLOT_KERN + old],



Home | Main Index | Thread Index | Old Index