tech-kern archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: UVM patch for PR port-xen/45975
On Sat, Feb 18, 2012 at 07:15:26PM +0000, Mindaugas Rasiukevicius wrote:
> Yes. Also, one should be careful about potential race condition - that is,
> TLB invalidations must happen *before* VA becomes available for use by other
> processes. Otherwise, memory might be allocated with that VA range and used
> while TLB caches have stale entries i.e. pointing to a wrong physical page.
>
> > - the calls to pmap_update() should be moved along with the calls to
> > pmap_kremove().
>
> Due to deferred TLB invalidations (or flush), we want to call pmap_update()
> at the point when all pmap_kremove() calls weere "collected".
Ok, so here's an updated patch
--
Manuel Bouyer <bouyer%antioche.eu.org@localhost>
NetBSD: 26 ans d'experience feront toujours la difference
--
Index: uvm_km.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_km.c,v
retrieving revision 1.120
diff -u -p -u -r1.120 uvm_km.c
--- uvm_km.c 10 Feb 2012 17:35:47 -0000 1.120
+++ uvm_km.c 18 Feb 2012 19:47:20 -0000
@@ -459,8 +459,12 @@ uvm_km_pgremove(vaddr_t startva, vaddr_t
void
uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
{
+#define __PGRM_BATCH 16
struct vm_page *pg;
- paddr_t pa;
+ paddr_t pa[__PGRM_BATCH];
+ int npgrm, i;
+ vaddr_t va, batch_vastart;
+
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
KASSERT(VM_MAP_IS_KERNEL(map));
@@ -468,16 +472,31 @@ uvm_km_pgremove_intrsafe(struct vm_map *
KASSERT(start < end);
KASSERT(end <= vm_map_max(map));
- for (; start < end; start += PAGE_SIZE) {
- if (!pmap_extract(pmap_kernel(), start, &pa)) {
- continue;
+ for (va = start; va < end;) {
+ batch_vastart = va;
+ /* create a batch of at most __PGRM_BATCH pages to free */
+ for (i = 0;
+ i < __PGRM_BATCH && va < end;
+ va += PAGE_SIZE) {
+ if (!pmap_extract(pmap_kernel(), va, &pa[i])) {
+ continue;
+ }
+ i++;
+ }
+ npgrm = i;
+ /* now remove the mappings */
+ pmap_kremove(batch_vastart, PAGE_SIZE * npgrm);
+ pmap_update(pmap_kernel());
+ /* and free the pages */
+ for (i = 0; i < npgrm; i++) {
+ pg = PHYS_TO_VM_PAGE(pa[i]);
+ KASSERT(pg);
+ KASSERT(pg->uobject == NULL && pg->uanon == NULL);
+ KASSERT((pg->flags & PG_BUSY) == 0);
+ uvm_pagefree(pg);
}
- pg = PHYS_TO_VM_PAGE(pa);
- KASSERT(pg);
- KASSERT(pg->uobject == NULL && pg->uanon == NULL);
- KASSERT((pg->flags & PG_BUSY) == 0);
- uvm_pagefree(pg);
}
+#undef __PGRM_BATCH
}
#if defined(DEBUG)
@@ -670,7 +689,6 @@ uvm_km_free(struct vm_map *map, vaddr_t
* remove it after. See comment below about KVA visibility.
*/
uvm_km_pgremove_intrsafe(map, addr, addr + size);
- pmap_kremove(addr, size);
}
/*
@@ -747,7 +765,6 @@ again:
} else {
uvm_km_pgremove_intrsafe(kernel_map, va,
va + size);
- pmap_kremove(va, size);
vmem_free(kmem_va_arena, va, size);
return ENOMEM;
}
@@ -783,8 +800,6 @@ uvm_km_kmem_free(vmem_t *vm, vmem_addr_t
}
#endif /* PMAP_UNMAP_POOLPAGE */
uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
- pmap_kremove(addr, size);
- pmap_update(pmap_kernel());
vmem_free(vm, addr, size);
}
Index: uvm_kmguard.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_kmguard.c,v
retrieving revision 1.9
diff -u -p -u -r1.9 uvm_kmguard.c
--- uvm_kmguard.c 5 Feb 2012 11:08:06 -0000 1.9
+++ uvm_kmguard.c 18 Feb 2012 19:47:20 -0000
@@ -180,8 +180,6 @@ uvm_kmguard_free(struct uvm_kmguard *kg,
*/
uvm_km_pgremove_intrsafe(kernel_map, va, va + PAGE_SIZE * 2);
- pmap_kremove(va, PAGE_SIZE * 2);
- pmap_update(pmap_kernel());
/*
* put the VA allocation into the list and swap an old one
Index: uvm_map.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_map.c,v
retrieving revision 1.313
diff -u -p -u -r1.313 uvm_map.c
--- uvm_map.c 12 Feb 2012 20:28:14 -0000 1.313
+++ uvm_map.c 18 Feb 2012 19:47:20 -0000
@@ -2246,7 +2246,6 @@ uvm_unmap_remove(struct vm_map *map, vad
if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
uvm_km_pgremove_intrsafe(map, entry->start,
entry->end);
- pmap_kremove(entry->start, len);
}
} else if (UVM_ET_ISOBJ(entry) &&
UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
Home |
Main Index |
Thread Index |
Old Index