tech-kern archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: UVM patch for PR port-xen/45975



On Sat, Feb 18, 2012 at 10:12:36AM -0800, Chuck Silvers wrote:
> On Sat, Feb 18, 2012 at 07:01:24PM +0100, Manuel Bouyer wrote:
> > >  - the calls to pmap_update() should be moved along with the calls to
> > >    pmap_kremove().
> > 
> > But not all pmap_kremove() are followed by a pmap_update() in the initial 
> > code.
> > Is it a bug ?
> 
> yes.  we don't currently defer TLB invalidations from pmap_kremove() on x86
> (or probably any platform), but if we start doing that then the pmap_update()s
> are needed.

Note that some places are doing the pmap_update(), but not immediatly
after uvm_km_pgremove_intrsafe(). By adding a pmap_update() in
uvm_km_pgremove_intrsafe() we may be doing it twice.

I've added pmap_update() to uvm_km_pgremove_intrsafe() and removed
pmap_update() calls that were immedialy following a uvm_km_pgremove_intrsafe.

I also implemented the idea of removing the mappings and freeing page
in batch of 16 entries. Updated patch attached, I'm re-running the
build.sh -jx release tests. This should also fix the problem in the native
case as there's no #ifdef any more :)

-- 
Manuel Bouyer <bouyer%antioche.eu.org@localhost>
     NetBSD: 26 ans d'experience feront toujours la difference
--
Index: uvm_km.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_km.c,v
retrieving revision 1.120
diff -u -p -u -r1.120 uvm_km.c
--- uvm_km.c    10 Feb 2012 17:35:47 -0000      1.120
+++ uvm_km.c    18 Feb 2012 18:57:39 -0000
@@ -459,8 +459,12 @@ uvm_km_pgremove(vaddr_t startva, vaddr_t
 void
 uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
 {
+#define __PGRM_BATCH 16
        struct vm_page *pg;
-       paddr_t pa;
+       paddr_t pa[__PGRM_BATCH];
+       int npgrm, i;
+       vaddr_t va, batch_vastart;
+
        UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
        KASSERT(VM_MAP_IS_KERNEL(map));
@@ -468,16 +472,31 @@ uvm_km_pgremove_intrsafe(struct vm_map *
        KASSERT(start < end);
        KASSERT(end <= vm_map_max(map));
 
-       for (; start < end; start += PAGE_SIZE) {
-               if (!pmap_extract(pmap_kernel(), start, &pa)) {
-                       continue;
+       for (va = start; va < end;) {
+               batch_vastart = va;
+               /* create a batch of at most __PGRM_BATCH pages to free */
+               for (i = 0;
+                    i < __PGRM_BATCH && va < end;
+                    va += PAGE_SIZE) {
+                       if (!pmap_extract(pmap_kernel(), va, &pa[i])) {
+                               continue;
+                       }
+                       i++;
+               }
+               npgrm = i;
+               /* now remove the mappings */
+               pmap_kremove(batch_vastart, PAGE_SIZE * npgrm);
+               /* and free the pages */
+               for (i = 0; i < npgrm; i++) {
+                       pg = PHYS_TO_VM_PAGE(pa[i]);
+                       KASSERT(pg);
+                       KASSERT(pg->uobject == NULL && pg->uanon == NULL);
+                       KASSERT((pg->flags & PG_BUSY) == 0);
+                       uvm_pagefree(pg);
                }
-               pg = PHYS_TO_VM_PAGE(pa);
-               KASSERT(pg);
-               KASSERT(pg->uobject == NULL && pg->uanon == NULL);
-               KASSERT((pg->flags & PG_BUSY) == 0);
-               uvm_pagefree(pg);
        }
+       pmap_update(pmap_kernel());
+#undef __PGRM_BATCH
 }
 
 #if defined(DEBUG)
@@ -670,7 +689,6 @@ uvm_km_free(struct vm_map *map, vaddr_t 
                 * remove it after.  See comment below about KVA visibility.
                 */
                uvm_km_pgremove_intrsafe(map, addr, addr + size);
-               pmap_kremove(addr, size);
        }
 
        /*
@@ -747,7 +765,6 @@ again:
                        } else {
                                uvm_km_pgremove_intrsafe(kernel_map, va,
                                    va + size);
-                               pmap_kremove(va, size);
                                vmem_free(kmem_va_arena, va, size);
                                return ENOMEM;
                        }
@@ -783,8 +800,6 @@ uvm_km_kmem_free(vmem_t *vm, vmem_addr_t
        }
 #endif /* PMAP_UNMAP_POOLPAGE */
        uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
-       pmap_kremove(addr, size);
-       pmap_update(pmap_kernel());
 
        vmem_free(vm, addr, size);
 }
Index: uvm_kmguard.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_kmguard.c,v
retrieving revision 1.9
diff -u -p -u -r1.9 uvm_kmguard.c
--- uvm_kmguard.c       5 Feb 2012 11:08:06 -0000       1.9
+++ uvm_kmguard.c       18 Feb 2012 18:57:39 -0000
@@ -180,8 +180,6 @@ uvm_kmguard_free(struct uvm_kmguard *kg,
         */
 
        uvm_km_pgremove_intrsafe(kernel_map, va, va + PAGE_SIZE * 2);
-       pmap_kremove(va, PAGE_SIZE * 2);
-       pmap_update(pmap_kernel());
 
        /*
         * put the VA allocation into the list and swap an old one
Index: uvm_map.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_map.c,v
retrieving revision 1.313
diff -u -p -u -r1.313 uvm_map.c
--- uvm_map.c   12 Feb 2012 20:28:14 -0000      1.313
+++ uvm_map.c   18 Feb 2012 18:57:39 -0000
@@ -2246,7 +2246,6 @@ uvm_unmap_remove(struct vm_map *map, vad
                        if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
                                uvm_km_pgremove_intrsafe(map, entry->start,
                                    entry->end);
-                               pmap_kremove(entry->start, len);
                        }
                } else if (UVM_ET_ISOBJ(entry) &&
                           UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {


Home | Main Index | Thread Index | Old Index