Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm When using uvm_km_pgremove_intrsafe() make sure mapp...



details:   https://anonhg.NetBSD.org/src/rev/60990cd36851
branches:  trunk
changeset: 777490:60990cd36851
user:      bouyer <bouyer%NetBSD.org@localhost>
date:      Mon Feb 20 19:14:23 2012 +0000

description:
When using uvm_km_pgremove_intrsafe() make sure mappings are removed
before returning the pages to the free pool. Otherwise, under Xen,
a page which still has a writable mapping could be allocated for
a PDP by another CPU and the hypervisor would refuse it (this is
PR port-xen/45975).
For this, move the pmap_kremove() calls inside uvm_km_pgremove_intrsafe(),
and do pmap_kremove()/uvm_pagefree() in batch of (at most) 16 entries
(as suggested by Chuck Silvers on tech-kern@, see also
http://mail-index.netbsd.org/tech-kern/2012/02/17/msg012727.html and
followups).

diffstat:

 sys/uvm/uvm_km.c      |  43 +++++++++++++++++++++++++++++--------------
 sys/uvm/uvm_kmguard.c |   5 ++---
 sys/uvm/uvm_map.c     |   5 ++---
 3 files changed, 33 insertions(+), 20 deletions(-)

diffs (147 lines):

diff -r 292652b85654 -r 60990cd36851 sys/uvm/uvm_km.c
--- a/sys/uvm/uvm_km.c  Mon Feb 20 18:27:30 2012 +0000
+++ b/sys/uvm/uvm_km.c  Mon Feb 20 19:14:23 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_km.c,v 1.121 2012/02/19 00:05:56 rmind Exp $       */
+/*     $NetBSD: uvm_km.c,v 1.122 2012/02/20 19:14:23 bouyer Exp $      */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -120,7 +120,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.121 2012/02/19 00:05:56 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.122 2012/02/20 19:14:23 bouyer Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -459,8 +459,12 @@
 void
 uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
 {
+#define __PGRM_BATCH 16
        struct vm_page *pg;
-       paddr_t pa;
+       paddr_t pa[__PGRM_BATCH];
+       int npgrm, i;
+       vaddr_t va, batch_vastart;
+
        UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
        KASSERT(VM_MAP_IS_KERNEL(map));
@@ -468,16 +472,30 @@
        KASSERT(start < end);
        KASSERT(end <= vm_map_max(map));
 
-       for (; start < end; start += PAGE_SIZE) {
-               if (!pmap_extract(pmap_kernel(), start, &pa)) {
-                       continue;
+       for (va = start; va < end;) {
+               batch_vastart = va;
+               /* create a batch of at most __PGRM_BATCH pages to free */
+               for (i = 0;
+                    i < __PGRM_BATCH && va < end;
+                    va += PAGE_SIZE) {
+                       if (!pmap_extract(pmap_kernel(), va, &pa[i])) {
+                               continue;
+                       }
+                       i++;
                }
-               pg = PHYS_TO_VM_PAGE(pa);
-               KASSERT(pg);
-               KASSERT(pg->uobject == NULL && pg->uanon == NULL);
-               KASSERT((pg->flags & PG_BUSY) == 0);
-               uvm_pagefree(pg);
+               npgrm = i;
+               /* now remove the mappings */
+               pmap_kremove(batch_vastart, PAGE_SIZE * npgrm);
+               /* and free the pages */
+               for (i = 0; i < npgrm; i++) {
+                       pg = PHYS_TO_VM_PAGE(pa[i]);
+                       KASSERT(pg);
+                       KASSERT(pg->uobject == NULL && pg->uanon == NULL);
+                       KASSERT((pg->flags & PG_BUSY) == 0);
+                       uvm_pagefree(pg);
+               }
        }
+#undef __PGRM_BATCH
 }
 
 #if defined(DEBUG)
@@ -668,7 +686,6 @@
                 * remove it after.  See comment below about KVA visibility.
                 */
                uvm_km_pgremove_intrsafe(map, addr, addr + size);
-               pmap_kremove(addr, size);
        }
 
        /*
@@ -745,7 +762,6 @@
                        } else {
                                uvm_km_pgremove_intrsafe(kernel_map, va,
                                    va + size);
-                               pmap_kremove(va, size);
                                vmem_free(kmem_va_arena, va, size);
                                return ENOMEM;
                        }
@@ -781,7 +797,6 @@
        }
 #endif /* PMAP_UNMAP_POOLPAGE */
        uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
-       pmap_kremove(addr, size);
        pmap_update(pmap_kernel());
 
        vmem_free(vm, addr, size);
diff -r 292652b85654 -r 60990cd36851 sys/uvm/uvm_kmguard.c
--- a/sys/uvm/uvm_kmguard.c     Mon Feb 20 18:27:30 2012 +0000
+++ b/sys/uvm/uvm_kmguard.c     Mon Feb 20 19:14:23 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_kmguard.c,v 1.9 2012/02/05 11:08:06 rmind Exp $    */
+/*     $NetBSD: uvm_kmguard.c,v 1.10 2012/02/20 19:14:23 bouyer Exp $  */
 
 /*-
  * Copyright (c) 2009 The NetBSD Foundation, Inc.
@@ -39,7 +39,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_kmguard.c,v 1.9 2012/02/05 11:08:06 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_kmguard.c,v 1.10 2012/02/20 19:14:23 bouyer Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -180,7 +180,6 @@
         */
 
        uvm_km_pgremove_intrsafe(kernel_map, va, va + PAGE_SIZE * 2);
-       pmap_kremove(va, PAGE_SIZE * 2);
        pmap_update(pmap_kernel());
 
        /*
diff -r 292652b85654 -r 60990cd36851 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Mon Feb 20 18:27:30 2012 +0000
+++ b/sys/uvm/uvm_map.c Mon Feb 20 19:14:23 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map.c,v 1.314 2012/02/19 00:05:56 rmind Exp $      */
+/*     $NetBSD: uvm_map.c,v 1.315 2012/02/20 19:14:23 bouyer Exp $     */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.314 2012/02/19 00:05:56 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.315 2012/02/20 19:14:23 bouyer Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -2221,7 +2221,6 @@
                        if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
                                uvm_km_pgremove_intrsafe(map, entry->start,
                                    entry->end);
-                               pmap_kremove(entry->start, len);
                        }
                } else if (UVM_ET_ISOBJ(entry) &&
                           UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {



Home | Main Index | Thread Index | Old Index