Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm - split uvm_map() into two functions for the followi...



details:   https://anonhg.NetBSD.org/src/rev/ad5cdea058b4
branches:  trunk
changeset: 558151:ad5cdea058b4
user:      yamt <yamt%NetBSD.org@localhost>
date:      Thu Jan 29 12:06:02 2004 +0000

description:
- split uvm_map() into two functions for the followings.
- for in-kernel maps, disable map entry merging so that
  unmap operations won't block. (workaround for PR/24039)
- for in-kernel maps, allocate kva for vm_map_entry from
  the map itsself and eliminate MAX_KMAPENT and
  uvm_map_entry_kmem_pool.

diffstat:

 sys/uvm/uvm.h       |    4 +-
 sys/uvm/uvm_km.c    |   30 ++-
 sys/uvm/uvm_map.c   |  480 +++++++++++++++++++++++++++++++++++++++++----------
 sys/uvm/uvm_map.h   |   35 ++-
 sys/uvm/uvm_map_i.h |    3 +-
 5 files changed, 431 insertions(+), 121 deletions(-)

diffs (truncated from 876 to 300 lines):

diff -r 91000ce80c52 -r ad5cdea058b4 sys/uvm/uvm.h
--- a/sys/uvm/uvm.h     Thu Jan 29 11:17:37 2004 +0000
+++ b/sys/uvm/uvm.h     Thu Jan 29 12:06:02 2004 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm.h,v 1.35 2002/12/01 22:58:43 matt Exp $    */
+/*     $NetBSD: uvm.h,v 1.36 2004/01/29 12:06:02 yamt Exp $    */
 
 /*
  *
@@ -110,8 +110,6 @@
        struct vm_anon *afree;          /* anon free list */
        struct simplelock afreelock;    /* lock on anon free list */
 
-       /* static kernel map entry pool */
-       struct vm_map_entry *kentry_free;       /* free page pool */
        struct simplelock kentry_lock;
 
        /* aio_done is locked by uvm.pagedaemon_lock and splbio! */
diff -r 91000ce80c52 -r ad5cdea058b4 sys/uvm/uvm_km.c
--- a/sys/uvm/uvm_km.c  Thu Jan 29 11:17:37 2004 +0000
+++ b/sys/uvm/uvm_km.c  Thu Jan 29 12:06:02 2004 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_km.c,v 1.66 2003/12/18 15:02:04 pk Exp $   */
+/*     $NetBSD: uvm_km.c,v 1.67 2004/01/29 12:06:02 yamt Exp $ */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -134,7 +134,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.66 2003/12/18 15:02:04 pk Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.67 2004/01/29 12:06:02 yamt Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -155,6 +155,7 @@
  */
 
 static struct vm_map           kernel_map_store;
+static struct vm_map_entry     kernel_first_mapent_store;
 
 /*
  * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
@@ -187,12 +188,27 @@
 
        uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
        kernel_map_store.pmap = pmap_kernel();
-       if (start != base &&
-           uvm_map(&kernel_map_store, &base, start - base, NULL,
-                   UVM_UNKNOWN_OFFSET, 0,
+       if (start != base) {
+               int error;
+               struct uvm_map_args args;
+
+               error = uvm_map_prepare(&kernel_map_store, base, start - base,
+                   NULL, UVM_UNKNOWN_OFFSET, 0,
                    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
-                               UVM_ADV_RANDOM, UVM_FLAG_FIXED)) != 0)
-               panic("uvm_km_init: could not reserve space for kernel");
+                               UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
+               if (!error) {
+                       struct vm_map_entry *entry = &kernel_first_mapent_store;
+
+                       kernel_first_mapent_store.flags =
+                           UVM_MAP_KERNEL | UVM_MAP_FIRST;
+                       error = uvm_map_enter(&kernel_map_store, &args, &entry);
+                       KASSERT(entry == NULL);
+               }
+
+               if (error)
+                       panic(
+                           "uvm_km_init: could not reserve space for kernel");
+       }
 
        /*
         * install!
diff -r 91000ce80c52 -r ad5cdea058b4 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Thu Jan 29 11:17:37 2004 +0000
+++ b/sys/uvm/uvm_map.c Thu Jan 29 12:06:02 2004 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_map.c,v 1.152 2003/12/19 06:02:50 simonb Exp $     */
+/*     $NetBSD: uvm_map.c,v 1.153 2004/01/29 12:06:02 yamt Exp $       */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -71,7 +71,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.152 2003/12/19 06:02:50 simonb Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.153 2004/01/29 12:06:02 yamt Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -100,8 +100,6 @@
 #include <uvm/uvm_ddb.h>
 #endif
 
-extern struct vm_map *pager_map;
-
 struct uvm_cnt map_ubackmerge, map_uforwmerge;
 struct uvm_cnt map_ubimerge, map_unomerge;
 struct uvm_cnt map_kbackmerge, map_kforwmerge;
@@ -120,7 +118,6 @@
  */
 
 struct pool uvm_map_entry_pool;
-struct pool uvm_map_entry_kmem_pool;
 
 MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
 MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
@@ -141,6 +138,17 @@
  */
 
 /*
+ * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
+ * for the vm_map.
+ *
+ * we exclude pager_map because it needs pager_map_wanted handling
+ * when doing map/unmap.
+ */
+extern struct vm_map *pager_map; /* XXX */
+#define        VM_MAP_USE_KMAPENT(map) \
+       (vm_map_pmap(map) == pmap_kernel() && (map) != pager_map)
+
+/*
  * uvm_map_entry_link: insert entry into a map
  *
  * => map must be locked
@@ -202,6 +210,9 @@
                uvm_mapent_alloc(struct vm_map *, int);
 static void    uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
 static void    uvm_mapent_free(struct vm_map_entry *);
+static struct vm_map_entry *
+               uvm_kmapent_alloc(struct vm_map *, int);
+static void    uvm_kmapent_free(struct vm_map_entry *);
 static void    uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
 static void    uvm_map_reference_amap(struct vm_map_entry *, int);
 static int     uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
@@ -381,29 +392,11 @@
 uvm_mapent_alloc(struct vm_map *map, int flags)
 {
        struct vm_map_entry *me;
-       int s;
        int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
        UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
 
-       if (map->flags & VM_MAP_INTRSAFE || cold) {
-               s = splvm();
-               simple_lock(&uvm.kentry_lock);
-               me = uvm.kentry_free;
-               if (me)
-                       uvm.kentry_free = me->next;
-               simple_unlock(&uvm.kentry_lock);
-               splx(s);
-               if (__predict_false(me == NULL)) {
-                       panic("uvm_mapent_alloc: out of static map entries, "
-                           "check MAX_KMAPENT (currently %d)",
-                           MAX_KMAPENT);
-               }
-               me->flags = UVM_MAP_STATIC;
-       } else if (map == kernel_map) {
-               me = pool_get(&uvm_map_entry_kmem_pool, pflags);
-               if (__predict_false(me == NULL))
-                       return NULL;
-               me->flags = UVM_MAP_KMEM;
+       if (VM_MAP_USE_KMAPENT(map)) {
+               me = uvm_kmapent_alloc(map, flags);
        } else {
                me = pool_get(&uvm_map_entry_pool, pflags);
                if (__predict_false(me == NULL))
@@ -423,20 +416,12 @@
 static __inline void
 uvm_mapent_free(struct vm_map_entry *me)
 {
-       int s;
        UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
 
        UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
                me, me->flags, 0, 0);
-       if (me->flags & UVM_MAP_STATIC) {
-               s = splvm();
-               simple_lock(&uvm.kentry_lock);
-               me->next = uvm.kentry_free;
-               uvm.kentry_free = me;
-               simple_unlock(&uvm.kentry_lock);
-               splx(s);
-       } else if (me->flags & UVM_MAP_KMEM) {
-               pool_put(&uvm_map_entry_kmem_pool, me);
+       if (me->flags & UVM_MAP_KERNEL) {
+               uvm_kmapent_free(me);
        } else {
                pool_put(&uvm_map_entry_pool, me);
        }
@@ -501,12 +486,10 @@
 void
 uvm_map_init(void)
 {
-       static struct vm_map_entry kernel_map_entry[MAX_KMAPENT];
 #if defined(UVMHIST)
        static struct uvm_history_ent maphistbuf[100];
        static struct uvm_history_ent pdhistbuf[100];
 #endif
-       int lcv;
 
        /*
         * first, init logging system.
@@ -542,15 +525,12 @@
        UVMCNT_INIT(uvm_mlk_hint, UVMCNT_CNT, 0, "# map lookup hint hits", 0);
 
        /*
-        * now set up static pool of kernel map entrys ...
+        * initialize the global lock for kernel map entry.
+        *
+        * XXX is it worth to have per-map lock instead?
         */
 
        simple_lock_init(&uvm.kentry_lock);
-       uvm.kentry_free = NULL;
-       for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) {
-               kernel_map_entry[lcv].next = uvm.kentry_free;
-               uvm.kentry_free = &kernel_map_entry[lcv];
-       }
 
        /*
         * initialize the map-related pools.
@@ -559,8 +539,6 @@
            0, 0, 0, "vmsppl", &pool_allocator_nointr);
        pool_init(&uvm_map_entry_pool, sizeof(struct vm_map_entry),
            0, 0, 0, "vmmpepl", &pool_allocator_nointr);
-       pool_init(&uvm_map_entry_kmem_pool, sizeof(struct vm_map_entry),
-           0, 0, 0, "vmmpekpl", NULL);
 }
 
 /*
@@ -585,6 +563,8 @@
 
        /* uvm_map_simplify_entry(map, entry); */ /* XXX */
 
+       KASSERT((entry->flags & UVM_MAP_KERNEL) == 0);
+
        uvm_tree_sanity(map, "clip_start entry");
 
        /*
@@ -639,6 +619,8 @@
        struct vm_map_entry *   new_entry;
        vaddr_t new_adj; /* #bytes we move start forward */
 
+       KASSERT((entry->flags & UVM_MAP_KERNEL) == 0);
+
        uvm_tree_sanity(map, "clip_end entry");
        /*
         *      Create a new entry and insert it
@@ -708,19 +690,54 @@
 uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
     struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
 {
-       struct vm_map_entry *prev_entry, *new_entry;
-       const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
-           AMAP_EXTEND_NOWAIT : 0;
-       vm_prot_t prot = UVM_PROTECTION(flags), maxprot =
-           UVM_MAXPROTECTION(flags);
-       vm_inherit_t inherit = UVM_INHERIT(flags);
-       int advice = UVM_ADVICE(flags);
-       int error, merged = 0, kmap = (vm_map_pmap(map) == pmap_kernel());
-       UVMHIST_FUNC("uvm_map");
+       struct uvm_map_args args;
+       struct vm_map_entry *new_entry;
+       int error;
+
+       /*
+        * for pager_map, allocate the new entry first to avoid sleeping
+        * for memory while we have the map locked.
+        *
+        * besides, because we allocates entries for in-kernel maps
+        * a bit differently (cf. uvm_kmapent_alloc/free), we need to
+        * allocate them before locking the map.
+        */
+
+       new_entry = NULL;
+       if (VM_MAP_USE_KMAPENT(map) || map == pager_map) {
+               flags |= UVM_FLAG_NOMERGE;
+               new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
+               if (__predict_false(new_entry == NULL))
+                       return ENOMEM;
+       }
+
+       error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
+           flags, &args);
+       if (!error) {
+               error = uvm_map_enter(map, &args, &new_entry);
+               *startp = args.uma_start;
+       }
+
+       if (new_entry)
+               uvm_mapent_free(new_entry);
+
+       return error;
+}



Home | Main Index | Thread Index | Old Index