Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys allocate uareas and buffers from kernel_map again
details: https://anonhg.NetBSD.org/src/rev/7f3bfe2869b2
branches: trunk
changeset: 773432:7f3bfe2869b2
user: para <para%NetBSD.org@localhost>
date: Wed Feb 01 23:43:49 2012 +0000
description:
allocate uareas and buffers from kernel_map again
add code to drain pools if kmem_arena runs out of space
diffstat:
sys/kern/subr_vmem.c | 9 +++++++--
sys/kern/vfs_bio.c | 19 ++++++++-----------
sys/uvm/uvm_glue.c | 39 ++++++++++++++++++++++++++++-----------
sys/uvm/uvm_km.c | 8 ++++----
sys/uvm/uvm_pdaemon.c | 16 ++++++++++------
5 files changed, 57 insertions(+), 34 deletions(-)
diffs (237 lines):
diff -r 8e4c648210a2 -r 7f3bfe2869b2 sys/kern/subr_vmem.c
--- a/sys/kern/subr_vmem.c Wed Feb 01 23:03:30 2012 +0000
+++ b/sys/kern/subr_vmem.c Wed Feb 01 23:43:49 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: subr_vmem.c,v 1.70 2012/01/30 17:35:18 para Exp $ */
+/* $NetBSD: subr_vmem.c,v 1.71 2012/02/01 23:43:49 para Exp $ */
/*-
* Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
@@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.70 2012/01/30 17:35:18 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.71 2012/02/01 23:43:49 para Exp $");
#if defined(_KERNEL)
#include "opt_ddb.h"
@@ -1245,6 +1245,11 @@
/* XXX */
if ((flags & VM_SLEEP) != 0) {
+#if defined(_KERNEL) && !defined(_RUMPKERNEL)
+ mutex_spin_enter(&uvm_fpageqlock);
+ uvm_kick_pdaemon();
+ mutex_spin_exit(&uvm_fpageqlock);
+#endif
VMEM_LOCK(vm);
VMEM_CONDVAR_WAIT(vm);
VMEM_UNLOCK(vm);
diff -r 8e4c648210a2 -r 7f3bfe2869b2 sys/kern/vfs_bio.c
--- a/sys/kern/vfs_bio.c Wed Feb 01 23:03:30 2012 +0000
+++ b/sys/kern/vfs_bio.c Wed Feb 01 23:43:49 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: vfs_bio.c,v 1.235 2012/01/28 00:00:06 rmind Exp $ */
+/* $NetBSD: vfs_bio.c,v 1.236 2012/02/01 23:43:49 para Exp $ */
/*-
* Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -123,7 +123,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.235 2012/01/28 00:00:06 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.236 2012/02/01 23:43:49 para Exp $");
#include "opt_bufcache.h"
@@ -231,21 +231,18 @@
static void *
bufpool_page_alloc(struct pool *pp, int flags)
{
- const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
- vmem_addr_t va;
- int ret;
- ret = uvm_km_kmem_alloc(kmem_va_arena, MAXBSIZE,
- vflags | VM_INSTANTFIT, &va);
-
- return ret ? NULL : (void *)va;
+ return (void *)uvm_km_alloc(buf_map,
+ MAXBSIZE, MAXBSIZE,
+ ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT|UVM_KMF_TRYLOCK)
+ | UVM_KMF_WIRED);
}
static void
bufpool_page_free(struct pool *pp, void *v)
{
- uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, MAXBSIZE);
+ uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED);
}
static struct pool_allocator bufmempool_allocator = {
@@ -491,7 +488,7 @@
pa = (size <= PAGE_SIZE && use_std)
? &pool_allocator_nointr
: &bufmempool_allocator;
- pool_init(pp, size, 0, 0, PR_NOALIGN, name, pa, IPL_NONE);
+ pool_init(pp, size, 0, 0, 0, name, pa, IPL_NONE);
pool_setlowat(pp, 1);
pool_sethiwat(pp, 1);
}
diff -r 8e4c648210a2 -r 7f3bfe2869b2 sys/uvm/uvm_glue.c
--- a/sys/uvm/uvm_glue.c Wed Feb 01 23:03:30 2012 +0000
+++ b/sys/uvm/uvm_glue.c Wed Feb 01 23:43:49 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_glue.c,v 1.153 2012/01/27 19:48:41 para Exp $ */
+/* $NetBSD: uvm_glue.c,v 1.154 2012/02/01 23:43:49 para Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -62,7 +62,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.153 2012/01/27 19:48:41 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.154 2012/02/01 23:43:49 para Exp $");
#include "opt_kgdb.h"
#include "opt_kstack.h"
@@ -240,15 +240,26 @@
static void *
uarea_poolpage_alloc(struct pool *pp, int flags)
{
- if (USPACE_ALIGN == 0) {
- int rc;
- vmem_addr_t va;
+#if defined(PMAP_MAP_POOLPAGE)
+ if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) {
+ struct vm_page *pg;
+ vaddr_t va;
- rc = uvm_km_kmem_alloc(kmem_va_arena, USPACE,
- ((flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP) |
- VM_INSTANTFIT, &va);
- return (rc != 0) ? NULL : (void *)va;
+#if defined(PMAP_ALLOC_POOLPAGE)
+ pg = PMAP_ALLOC_POOLPAGE(
+ ((flags & PR_WAITOK) == 0 ? UVM_KMF_NOWAIT : 0));
+#else
+ pg = uvm_pagealloc(NULL, 0, NULL,
+ ((flags & PR_WAITOK) == 0 ? UVM_KMF_NOWAIT : 0));
+#endif
+ if (pg == NULL)
+ return NULL;
+ va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
+ if (va == 0)
+ uvm_pagefree(pg);
+ return (void *)va;
}
+#endif
#if defined(__HAVE_CPU_UAREA_ROUTINES)
void *va = cpu_uarea_alloc(false);
if (va)
@@ -263,10 +274,16 @@
static void
uarea_poolpage_free(struct pool *pp, void *addr)
{
- if (USPACE_ALIGN == 0) {
- uvm_km_kmem_free(kmem_va_arena, (vmem_addr_t)addr, USPACE);
+#if defined(PMAP_MAP_POOLPAGE)
+ if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) {
+ paddr_t pa;
+
+ pa = PMAP_UNMAP_POOLPAGE((vaddr_t) addr);
+ KASSERT(pa != 0);
+ uvm_pagefree(PHYS_TO_VM_PAGE(pa));
return;
}
+#endif
#if defined(__HAVE_CPU_UAREA_ROUTINES)
if (cpu_uarea_free(addr))
return;
diff -r 8e4c648210a2 -r 7f3bfe2869b2 sys/uvm/uvm_km.c
--- a/sys/uvm/uvm_km.c Wed Feb 01 23:03:30 2012 +0000
+++ b/sys/uvm/uvm_km.c Wed Feb 01 23:43:49 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_km.c,v 1.115 2012/02/01 02:22:27 matt Exp $ */
+/* $NetBSD: uvm_km.c,v 1.116 2012/02/01 23:43:49 para Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -120,7 +120,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.115 2012/02/01 02:22:27 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.116 2012/02/01 23:43:49 para Exp $");
#include "opt_uvmhist.h"
@@ -167,8 +167,8 @@
{
vaddr_t base = VM_MIN_KERNEL_ADDRESS;
- kmemsize = MIN(((((vsize_t)(end - start)) / 3) * 2),
- ((((vsize_t)uvmexp.npages) * PAGE_SIZE)));
+ kmemsize = MIN((((vsize_t)(end - start)) / 3),
+ ((((vsize_t)uvmexp.npages) * PAGE_SIZE) / 2));
kmemsize = round_page(kmemsize);
/*
diff -r 8e4c648210a2 -r 7f3bfe2869b2 sys/uvm/uvm_pdaemon.c
--- a/sys/uvm/uvm_pdaemon.c Wed Feb 01 23:03:30 2012 +0000
+++ b/sys/uvm/uvm_pdaemon.c Wed Feb 01 23:43:49 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_pdaemon.c,v 1.104 2012/01/27 19:48:42 para Exp $ */
+/* $NetBSD: uvm_pdaemon.c,v 1.105 2012/02/01 23:43:49 para Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.104 2012/01/27 19:48:42 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.105 2012/02/01 23:43:49 para Exp $");
#include "opt_uvmhist.h"
#include "opt_readahead.h"
@@ -173,7 +173,8 @@
if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
(uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
- uvmpdpol_needsscan_p())) {
+ uvmpdpol_needsscan_p()) ||
+ uvm_km_va_starved_p()) {
wakeup(&uvm.pagedaemon);
}
}
@@ -248,10 +249,13 @@
*/
for (;;) {
- bool needsscan, needsfree;
+ bool needsscan, needsfree, kmem_va_starved;
+
+ kmem_va_starved = uvm_km_va_starved_p();
mutex_spin_enter(&uvm_fpageqlock);
- if (uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) {
+ if ((uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) &&
+ !kmem_va_starved) {
UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
&uvm_fpageqlock, false, "pgdaemon", 0);
@@ -320,7 +324,7 @@
* if we don't need free memory, we're done.
*/
- if (!needsfree && !uvm_km_va_starved_p())
+ if (!needsfree && !kmem_va_starved)
continue;
/*
Home |
Main Index |
Thread Index |
Old Index