Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm create a new pool for map entries, allocated from km...



details:   https://anonhg.NetBSD.org/src/rev/a2dd3e83cd54
branches:  trunk
changeset: 514727:a2dd3e83cd54
user:      chs <chs%NetBSD.org@localhost>
date:      Sun Sep 09 19:38:22 2001 +0000

description:
create a new pool for map entries, allocated from kmem_map instead of
kernel_map.  use this instead of the static map entries when allocating
map entries for kernel_map.  this greatly reduces the number of static
map entries used and should eliminate the problems with running out.

diffstat:

 sys/uvm/uvm_map.c |  58 ++++++++++++++++++++++++++++--------------------------
 sys/uvm/uvm_map.h |   3 +-
 2 files changed, 32 insertions(+), 29 deletions(-)

diffs (144 lines):

diff -r 347d425a772e -r a2dd3e83cd54 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Sun Sep 09 18:36:36 2001 +0000
+++ b/sys/uvm/uvm_map.c Sun Sep 09 19:38:22 2001 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map.c,v 1.103 2001/09/07 00:50:54 lukem Exp $      */
+/*     $NetBSD: uvm_map.c,v 1.104 2001/09/09 19:38:22 chs Exp $        */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -80,6 +80,7 @@
 #include <sys/proc.h>
 #include <sys/malloc.h>
 #include <sys/pool.h>
+#include <sys/kernel.h>
 
 #ifdef SYSVSHM
 #include <sys/shm.h>
@@ -108,6 +109,7 @@
  */
 
 struct pool uvm_map_entry_pool;
+struct pool uvm_map_entry_kmem_pool;
 
 #ifdef PMAP_GROWKERNEL
 /*
@@ -192,8 +194,6 @@
 
 /*
  * uvm_mapent_alloc: allocate a map entry
- *
- * => XXX: static pool for kernel map?
  */
 
 static __inline struct vm_map_entry *
@@ -202,38 +202,36 @@
 {
        struct vm_map_entry *me;
        int s;
-       UVMHIST_FUNC("uvm_mapent_alloc");
-       UVMHIST_CALLED(maphist);
-
-       if ((map->flags & VM_MAP_INTRSAFE) == 0 &&
-           map != kernel_map && kernel_map != NULL /* XXX */) {
-               me = pool_get(&uvm_map_entry_pool, PR_WAITOK);
-               me->flags = 0;
-               /* me can't be null, wait ok */
-       } else {
-               s = splvm();    /* protect kentry_free list with splvm */
+       UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
+
+       if (map->flags & VM_MAP_INTRSAFE || cold) {
+               s = splvm();
                simple_lock(&uvm.kentry_lock);
                me = uvm.kentry_free;
                if (me) uvm.kentry_free = me->next;
                simple_unlock(&uvm.kentry_lock);
                splx(s);
-               if (!me)
-                       panic(
-    "mapent_alloc: out of static map entries, check MAX_KMAPENT (currently %d)",
-       MAX_KMAPENT);
+               if (me == NULL) {
+                       panic("uvm_mapent_alloc: out of static map entries, "
+                             "check MAX_KMAPENT (currently %d)",
+                             MAX_KMAPENT);
+               }
                me->flags = UVM_MAP_STATIC;
+       } else if (map == kernel_map) {
+               me = pool_get(&uvm_map_entry_kmem_pool, PR_WAITOK);
+               me->flags = UVM_MAP_KMEM;
+       } else {
+               me = pool_get(&uvm_map_entry_pool, PR_WAITOK);
+               me->flags = 0;
        }
 
-       UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]",
-               me, ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map)
-               ? TRUE : FALSE, 0, 0);
+       UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
+           ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
        return(me);
 }
 
 /*
  * uvm_mapent_free: free map entry
- *
- * => XXX: static pool for kernel map?
  */
 
 static __inline void
@@ -241,19 +239,21 @@
        struct vm_map_entry *me;
 {
        int s;
-       UVMHIST_FUNC("uvm_mapent_free");
-       UVMHIST_CALLED(maphist);
+       UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
+
        UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
                me, me->flags, 0, 0);
-       if ((me->flags & UVM_MAP_STATIC) == 0) {
-               pool_put(&uvm_map_entry_pool, me);
-       } else {
-               s = splvm();    /* protect kentry_free list with splvm */
+       if (me->flags & UVM_MAP_STATIC) {
+               s = splvm();
                simple_lock(&uvm.kentry_lock);
                me->next = uvm.kentry_free;
                uvm.kentry_free = me;
                simple_unlock(&uvm.kentry_lock);
                splx(s);
+       } else if (me->flags & UVM_MAP_KMEM) {
+               pool_put(&uvm_map_entry_kmem_pool, me);
+       } else {
+               pool_put(&uvm_map_entry_pool, me);
        }
 }
 
@@ -364,6 +364,8 @@
        pool_init(&uvm_map_entry_pool, sizeof(struct vm_map_entry),
            0, 0, 0, "vmmpepl", 0,
            pool_page_alloc_nointr, pool_page_free_nointr, M_VMMAP);
+       pool_init(&uvm_map_entry_kmem_pool, sizeof(struct vm_map_entry),
+           0, 0, 0, "vmmpekpl", 0, NULL, NULL, M_VMMAP);
 }
 
 /*
diff -r 347d425a772e -r a2dd3e83cd54 sys/uvm/uvm_map.h
--- a/sys/uvm/uvm_map.h Sun Sep 09 18:36:36 2001 +0000
+++ b/sys/uvm/uvm_map.h Sun Sep 09 19:38:22 2001 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map.h,v 1.29 2001/06/26 17:55:15 thorpej Exp $     */
+/*     $NetBSD: uvm_map.h,v 1.30 2001/09/09 19:38:23 chs Exp $ */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -138,6 +138,7 @@
        u_int8_t                flags;          /* flags */
 
 #define UVM_MAP_STATIC         0x01            /* static map entry */
+#define UVM_MAP_KMEM           0x02            /* from kmem entry pool */
 
 };
 



Home | Main Index | Thread Index | Old Index