Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch apply the change from arch/x86/x86/pmap.c rev. 1.26...



details:   https://anonhg.NetBSD.org/src/rev/0a5067c8d825
branches:  trunk
changeset: 829377:0a5067c8d825
user:      chs <chs%NetBSD.org@localhost>
date:      Sat Jan 27 23:07:36 2018 +0000

description:
apply the change from arch/x86/x86/pmap.c rev. 1.266 commitid vZRjvmxG7YTHLOfA:

In pmap_enter_ma(), only try to allocate pves if we might need them,
and even if that fails, only fail the operation if we later discover
that we really do need them.  If we are replacing an existing mapping,
reuse the pv structure where possible.

This implements the requirement that pmap_enter(PMAP_CANFAIL) must not fail
when replacing an existing mapping with the first mapping of a new page,
which is an unintended consequence of the changes from the rmind-uvmplock
branch in 2011.

The problem arises when pmap_enter(PMAP_CANFAIL) is used to replace an existing
pmap mapping with a mapping of a different page (eg. to resolve a copy-on-write).
If that fails and leaves the old pmap entry in place, then UVM won't hold
the right locks when it eventually retries.  This entanglement of the UVM and
pmap locking was done in rmind-uvmplock in order to improve performance,
but it also means that the UVM state and pmap state need to be kept in sync
more than they did before.  It would be possible to handle this in the UVM code
instead of in the pmap code, but these pmap changes improve the handling of
low memory situations in general, and handling this in UVM would be clunky,
so this seemed like the better way to go.

This somewhat indirectly fixes PR 52706 on the remaining platforms where
this problem existed.

diffstat:

 sys/arch/alpha/alpha/pmap.c        |  54 ++++++++++++++---------
 sys/arch/m68k/m68k/pmap_motorola.c |  46 ++++++++++++++------
 sys/arch/powerpc/oea/pmap.c        |  18 +++++--
 sys/arch/sparc64/sparc64/pmap.c    |  84 +++++++++++++++----------------------
 4 files changed, 112 insertions(+), 90 deletions(-)

diffs (truncated from 584 to 300 lines):

diff -r b8f0bad8d5d6 -r 0a5067c8d825 sys/arch/alpha/alpha/pmap.c
--- a/sys/arch/alpha/alpha/pmap.c       Sat Jan 27 22:25:23 2018 +0000
+++ b/sys/arch/alpha/alpha/pmap.c       Sat Jan 27 23:07:36 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.261 2016/12/23 07:15:27 cherry Exp $ */
+/* $NetBSD: pmap.c,v 1.262 2018/01/27 23:07:36 chs Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008 The NetBSD Foundation, Inc.
@@ -140,7 +140,7 @@
 
 #include <sys/cdefs.h>                 /* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.261 2016/12/23 07:15:27 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.262 2018/01/27 23:07:36 chs Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -439,7 +439,8 @@
  * Internal routines
  */
 static void    alpha_protection_init(void);
-static bool    pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool, long);
+static bool    pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool, long,
+                                   pv_entry_t *);
 static void    pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t, long);
 
 /*
@@ -466,8 +467,9 @@
  * PV table management functions.
  */
 static int     pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, pt_entry_t *,
-                             bool);
-static void    pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t, bool);
+                             bool, pv_entry_t);
+static void    pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t, bool,
+                              pv_entry_t *);
 static void    *pmap_pv_page_alloc(struct pool *, int);
 static void    pmap_pv_page_free(struct pool *, void *);
 
@@ -1266,7 +1268,7 @@
                                            sva);
 #endif
                                needisync |= pmap_remove_mapping(pmap, sva,
-                                   l3pte, true, cpu_id);
+                                   l3pte, true, cpu_id, NULL);
                        }
                        sva += PAGE_SIZE;
                }
@@ -1343,7 +1345,7 @@
                                                    pmap_remove_mapping(
                                                        pmap, sva,
                                                        l3pte, true,
-                                                       cpu_id);
+                                                       cpu_id, NULL);
                                        }
 
                                        /*
@@ -1450,7 +1452,7 @@
                        panic("pmap_page_protect: bad mapping");
 #endif
                if (pmap_remove_mapping(pmap, pv->pv_va, pv->pv_pte,
-                   false, cpu_id) == true) {
+                   false, cpu_id, NULL)) {
                        if (pmap == pmap_kernel())
                                needkisync |= true;
                        else
@@ -1558,6 +1560,7 @@
 {
        struct vm_page *pg;                     /* if != NULL, managed page */
        pt_entry_t *pte, npte, opte;
+       pv_entry_t opv = NULL;
        paddr_t opa;
        bool tflush = true;
        bool hadasm = false;    /* XXX gcc -Wuninitialized */
@@ -1750,14 +1753,15 @@
                 */
                pmap_physpage_addref(pte);
        }
-       needisync |= pmap_remove_mapping(pmap, va, pte, true, cpu_id);
+       needisync |= pmap_remove_mapping(pmap, va, pte, true, cpu_id, &opv);
 
  validate_enterpv:
        /*
         * Enter the mapping into the pv_table if appropriate.
         */
        if (pg != NULL) {
-               error = pmap_pv_enter(pmap, pg, va, pte, true);
+               error = pmap_pv_enter(pmap, pg, va, pte, true, opv);
+               opv = NULL;
                if (error) {
                        pmap_l3pt_delref(pmap, va, pte, cpu_id);
                        if (flags & PMAP_CANFAIL)
@@ -1845,6 +1849,8 @@
 out:
        PMAP_UNLOCK(pmap);
        PMAP_MAP_TO_HEAD_UNLOCK();
+       if (opv)
+               pmap_pv_free(opv);
        
        return error;
 }
@@ -2422,7 +2428,7 @@
  */
 static bool
 pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte,
-    bool dolock, long cpu_id)
+    bool dolock, long cpu_id, pv_entry_t *opvp)
 {
        paddr_t pa;
        struct vm_page *pg;             /* if != NULL, page is managed */
@@ -2434,8 +2440,8 @@
 
 #ifdef DEBUG
        if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
-               printf("pmap_remove_mapping(%p, %lx, %p, %d, %ld)\n",
-                      pmap, va, pte, dolock, cpu_id);
+               printf("pmap_remove_mapping(%p, %lx, %p, %d, %ld, %p)\n",
+                      pmap, va, pte, dolock, cpu_id, opvp);
 #endif
 
        /*
@@ -2511,7 +2517,8 @@
         */
        pg = PHYS_TO_VM_PAGE(pa);
        KASSERT(pg != NULL);
-       pmap_pv_remove(pmap, pg, va, dolock);
+       pmap_pv_remove(pmap, pg, va, dolock, opvp);
+       KASSERT(opvp == NULL || *opvp != NULL);
 
        return (needisync);
 }
@@ -2765,18 +2772,19 @@
  */
 static int
 pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte,
-    bool dolock)
+    bool dolock, pv_entry_t newpv)
 {
        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-       pv_entry_t newpv;
        kmutex_t *lock;
 
        /*
         * Allocate and fill in the new pv_entry.
         */
-       newpv = pmap_pv_alloc();
-       if (newpv == NULL)
-               return ENOMEM;
+       if (newpv == NULL) {
+               newpv = pmap_pv_alloc();
+               if (newpv == NULL)
+                       return ENOMEM;
+       }
        newpv->pv_va = va;
        newpv->pv_pmap = pmap;
        newpv->pv_pte = pte;
@@ -2820,7 +2828,8 @@
  *     Remove a physical->virtual entry from the pv_table.
  */
 static void
-pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock)
+pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock,
+       pv_entry_t *opvp)
 {
        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
        pv_entry_t pv, *pvp;
@@ -2852,7 +2861,10 @@
                mutex_exit(lock);
        }
 
-       pmap_pv_free(pv);
+       if (opvp != NULL)
+               *opvp = pv;
+       else
+               pmap_pv_free(pv);
 }
 
 /*
diff -r b8f0bad8d5d6 -r 0a5067c8d825 sys/arch/m68k/m68k/pmap_motorola.c
--- a/sys/arch/m68k/m68k/pmap_motorola.c        Sat Jan 27 22:25:23 2018 +0000
+++ b/sys/arch/m68k/m68k/pmap_motorola.c        Sat Jan 27 23:07:36 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap_motorola.c,v 1.69 2016/12/23 07:15:27 cherry Exp $        */
+/*     $NetBSD: pmap_motorola.c,v 1.70 2018/01/27 23:07:36 chs Exp $        */
 
 /*-
  * Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -119,7 +119,7 @@
 #include "opt_m68k_arch.h"
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.69 2016/12/23 07:15:27 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.70 2018/01/27 23:07:36 chs Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -306,7 +306,8 @@
 /*
  * Internal routines
  */
-void   pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int);
+void   pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int,
+                           struct pv_entry **);
 bool   pmap_testbit(paddr_t, int);
 bool   pmap_changebit(paddr_t, int, int);
 int    pmap_enter_ptpage(pmap_t, vaddr_t, bool);
@@ -843,7 +844,7 @@
                                }
                                firstpage = false;
 #endif
-                               pmap_remove_mapping(pmap, sva, pte, flags);
+                               pmap_remove_mapping(pmap, sva, pte, flags, NULL);
                        }
                        pte++;
                        sva += PAGE_SIZE;
@@ -929,7 +930,7 @@
                        panic("pmap_page_protect: bad mapping");
 #endif
                pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
-                   pte, PRM_TFLUSH|PRM_CFLUSH);
+                   pte, PRM_TFLUSH|PRM_CFLUSH, NULL);
        }
        splx(s);
 }
@@ -1048,6 +1049,7 @@
 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
 {
        pt_entry_t *pte;
+       struct pv_entry *opv = NULL;
        int npte;
        paddr_t opa;
        bool cacheable = true;
@@ -1130,7 +1132,7 @@
                PMAP_DPRINTF(PDB_ENTER,
                    ("enter: removing old mapping %lx\n", va));
                pmap_remove_mapping(pmap, va, pte,
-                   PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
+                   PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE, &opv);
        }
 
        /*
@@ -1179,7 +1181,12 @@
                                if (pmap == npv->pv_pmap && va == npv->pv_va)
                                        panic("pmap_enter: already in pv_tab");
 #endif
-                       npv = pmap_alloc_pv();
+                       if (opv != NULL) {
+                               npv = opv;
+                               opv = NULL;
+                       } else {
+                               npv = pmap_alloc_pv();
+                       }
                        KASSERT(npv != NULL);
                        npv->pv_va = va;
                        npv->pv_pmap = pmap;
@@ -1346,6 +1353,9 @@
                pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
 #endif
 
+       if (opv != NULL)
+               pmap_free_pv(opv);
+
        return 0;
 }
 
@@ -1659,7 +1669,7 @@
 
                (void) pmap_extract(pmap, pv->pv_va, &kpa);
                pmap_remove_mapping(pmap, pv->pv_va, NULL,
-                   PRM_TFLUSH|PRM_CFLUSH);
+                   PRM_TFLUSH|PRM_CFLUSH, NULL);
 
                /*
                 * Use the physical address to locate the original
@@ -1970,11 +1980,12 @@
  */
 /* static */
 void
-pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags)
+pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags,
+    struct pv_entry **opvp)
 {
        paddr_t pa;
        struct pv_header *pvh;
-       struct pv_entry *pv, *npv;
+       struct pv_entry *pv, *npv, *opv = NULL;
        struct pmap *ptpmap;
        st_entry_t *ste;
        int s, bits;
@@ -1983,8 +1994,8 @@
 #endif
 
        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
-           ("pmap_remove_mapping(%p, %lx, %p, %x)\n",
-           pmap, va, pte, flags));
+           ("pmap_remove_mapping(%p, %lx, %p, %x, %p)\n",
+           pmap, va, pte, flags, opvp));
 
        /*



Home | Main Index | Thread Index | Old Index