Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys make vmem(9) ready to be used early during bootstrap to ...



details:   https://anonhg.NetBSD.org/src/rev/a87f345b846c
branches:  trunk
changeset: 784345:a87f345b846c
user:      para <para%NetBSD.org@localhost>
date:      Sat Jan 26 13:50:33 2013 +0000

description:
make vmem(9) ready to be used early during bootstrap to replace extent(9).
pass memory for vmem structs into the initialization functions and
do away with the static pools for this.
factor out the vmem internal structures into a private header.
remove special bootstrapping of the kmem_va_arena as all necessary memory
comes from pool_allocator_meta wich is fully operational at this point.

diffstat:

 sys/kern/subr_vmem.c           |  236 ++++++++++------------------------------
 sys/rump/librump/rumpkern/vm.c |    7 +-
 sys/sys/vmem.h                 |    8 +-
 sys/sys/vmem_impl.h            |  138 +++++++++++++++++++++++
 sys/uvm/uvm_km.c               |   15 +-
 5 files changed, 214 insertions(+), 190 deletions(-)

diffs (truncated from 698 to 300 lines):

diff -r a526e8e48c86 -r a87f345b846c sys/kern/subr_vmem.c
--- a/sys/kern/subr_vmem.c      Sat Jan 26 11:58:43 2013 +0000
+++ b/sys/kern/subr_vmem.c      Sat Jan 26 13:50:33 2013 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: subr_vmem.c,v 1.77 2013/01/04 08:28:38 para Exp $      */
+/*     $NetBSD: subr_vmem.c,v 1.78 2013/01/26 13:50:33 para Exp $      */
 
 /*-
  * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.77 2013/01/04 08:28:38 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.78 2013/01/26 13:50:33 para Exp $");
 
 #if defined(_KERNEL)
 #include "opt_ddb.h"
@@ -53,6 +53,7 @@
 #include <sys/kmem.h>
 #include <sys/pool.h>
 #include <sys/vmem.h>
+#include <sys/vmem_impl.h>
 #include <sys/workqueue.h>
 #include <sys/atomic.h>
 #include <uvm/uvm.h>
@@ -61,7 +62,13 @@
 #include <uvm/uvm_page.h>
 #include <uvm/uvm_pdaemon.h>
 #else /* defined(_KERNEL) */
+#include <stdio.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
 #include "../sys/vmem.h"
+#include "../sys/vmem_impl.h"
 #endif /* defined(_KERNEL) */
 
 
@@ -78,28 +85,23 @@
 VMEM_EVCNT_DEFINE(bt_count)
 VMEM_EVCNT_DEFINE(bt_inuse)
 
-#define        LOCK_DECL(name)         \
-    kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)]
-
-#define CONDVAR_DECL(name)     \
-    kcondvar_t name
+#define        VMEM_CONDVAR_INIT(vm, wchan)    cv_init(&vm->vm_cv, wchan)
+#define        VMEM_CONDVAR_DESTROY(vm)        cv_destroy(&vm->vm_cv)
+#define        VMEM_CONDVAR_WAIT(vm)           cv_wait(&vm->vm_cv, &vm->vm_lock)
+#define        VMEM_CONDVAR_BROADCAST(vm)      cv_broadcast(&vm->vm_cv)
 
 #else /* defined(_KERNEL) */
-#include <stdio.h>
-#include <errno.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
 
 #define VMEM_EVCNT_INCR(ev)    /* nothing */
 #define VMEM_EVCNT_DECR(ev)    /* nothing */
 
+#define        VMEM_CONDVAR_INIT(vm, wchan)    /* nothing */
+#define        VMEM_CONDVAR_DESTROY(vm)        /* nothing */
+#define        VMEM_CONDVAR_WAIT(vm)           /* nothing */
+#define        VMEM_CONDVAR_BROADCAST(vm)      /* nothing */
+
 #define        UNITTEST
 #define        KASSERT(a)              assert(a)
-#define        LOCK_DECL(name)         /* nothing */
-#define        CONDVAR_DECL(name)      /* nothing */
-#define        VMEM_CONDVAR_INIT(vm, wchan)    /* nothing */
-#define        VMEM_CONDVAR_BROADCAST(vm)      /* nothing */
 #define        mutex_init(a, b, c)     /* nothing */
 #define        mutex_destroy(a)        /* nothing */
 #define        mutex_enter(a)          /* nothing */
@@ -110,74 +112,25 @@
 #define        panic(...)              printf(__VA_ARGS__); abort()
 #endif /* defined(_KERNEL) */
 
-struct vmem;
-struct vmem_btag;
-
 #if defined(VMEM_SANITY)
 static void vmem_check(vmem_t *);
 #else /* defined(VMEM_SANITY) */
 #define vmem_check(vm) /* nothing */
 #endif /* defined(VMEM_SANITY) */
 
-#define        VMEM_MAXORDER           (sizeof(vmem_size_t) * CHAR_BIT)
-
 #define        VMEM_HASHSIZE_MIN       1       /* XXX */
 #define        VMEM_HASHSIZE_MAX       65536   /* XXX */
 #define        VMEM_HASHSIZE_INIT      1
 
 #define        VM_FITMASK      (VM_BESTFIT | VM_INSTANTFIT)
 
-CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
-LIST_HEAD(vmem_freelist, vmem_btag);
-LIST_HEAD(vmem_hashlist, vmem_btag);
-
-#if defined(QCACHE)
-#define        VMEM_QCACHE_IDX_MAX     32
-
-#define        QC_NAME_MAX     16
-
-struct qcache {
-       pool_cache_t qc_cache;
-       vmem_t *qc_vmem;
-       char qc_name[QC_NAME_MAX];
-};
-typedef struct qcache qcache_t;
-#define        QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
-#endif /* defined(QCACHE) */
-
-#define        VMEM_NAME_MAX   16
+#if defined(_KERNEL)
+static bool vmem_bootstrapped = false;
+static kmutex_t vmem_list_lock;
+static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
+#endif /* defined(_KERNEL) */
 
-/* vmem arena */
-struct vmem {
-       CONDVAR_DECL(vm_cv);
-       LOCK_DECL(vm_lock);
-       vm_flag_t vm_flags;
-       vmem_import_t *vm_importfn;
-       vmem_release_t *vm_releasefn;
-       size_t vm_nfreetags;
-       LIST_HEAD(, vmem_btag) vm_freetags;
-       void *vm_arg;
-       struct vmem_seglist vm_seglist;
-       struct vmem_freelist vm_freelist[VMEM_MAXORDER];
-       size_t vm_hashsize;
-       size_t vm_nbusytag;
-       struct vmem_hashlist *vm_hashlist;
-       struct vmem_hashlist vm_hash0;
-       size_t vm_quantum_mask;
-       int vm_quantum_shift;
-       size_t vm_size;
-       size_t vm_inuse;
-       char vm_name[VMEM_NAME_MAX+1];
-       LIST_ENTRY(vmem) vm_alllist;
-
-#if defined(QCACHE)
-       /* quantum cache */
-       size_t vm_qcache_max;
-       struct pool_allocator vm_qcache_allocator;
-       qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX];
-       qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX];
-#endif /* defined(QCACHE) */
-};
+/* ---- misc */
 
 #define        VMEM_LOCK(vm)           mutex_enter(&vm->vm_lock)
 #define        VMEM_TRYLOCK(vm)        mutex_tryenter(&vm->vm_lock)
@@ -186,44 +139,6 @@
 #define        VMEM_LOCK_DESTROY(vm)   mutex_destroy(&vm->vm_lock)
 #define        VMEM_ASSERT_LOCKED(vm)  KASSERT(mutex_owned(&vm->vm_lock))
 
-#if defined(_KERNEL)
-#define        VMEM_CONDVAR_INIT(vm, wchan)    cv_init(&vm->vm_cv, wchan)
-#define        VMEM_CONDVAR_DESTROY(vm)        cv_destroy(&vm->vm_cv)
-#define        VMEM_CONDVAR_WAIT(vm)           cv_wait(&vm->vm_cv, &vm->vm_lock)
-#define        VMEM_CONDVAR_BROADCAST(vm)      cv_broadcast(&vm->vm_cv)
-#endif /* defined(_KERNEL) */
-
-/* boundary tag */
-struct vmem_btag {
-       CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
-       union {
-               LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
-               LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
-       } bt_u;
-#define        bt_hashlist     bt_u.u_hashlist
-#define        bt_freelist     bt_u.u_freelist
-       vmem_addr_t bt_start;
-       vmem_size_t bt_size;
-       int bt_type;
-};
-
-#define        BT_TYPE_SPAN            1
-#define        BT_TYPE_SPAN_STATIC     2
-#define        BT_TYPE_FREE            3
-#define        BT_TYPE_BUSY            4
-#define        BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
-
-#define        BT_END(bt)      ((bt)->bt_start + (bt)->bt_size - 1)
-
-typedef struct vmem_btag bt_t;
-
-#if defined(_KERNEL)
-static kmutex_t vmem_list_lock;
-static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
-#endif /* defined(_KERNEL) */
-
-/* ---- misc */
-
 #define        VMEM_ALIGNUP(addr, align) \
        (-(-(addr) & -(align)))
 
@@ -241,36 +156,26 @@
 #else /* defined(_KERNEL) */
 
 #define        xmalloc(sz, flags) \
-    kmem_intr_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
-#define        xfree(p, sz)            kmem_intr_free(p, sz);
+    kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
+#define        xfree(p, sz)            kmem_free(p, sz);
 
 /*
- * Memory for arenas initialized during bootstrap.
- * There is memory for STATIC_VMEM_COUNT bootstrap arenas.
- *
  * BT_RESERVE calculation:
  * we allocate memory for boundry tags with vmem, therefor we have
  * to keep a reserve of bts used to allocated memory for bts. 
  * This reserve is 4 for each arena involved in allocating vmems memory.
  * BT_MAXFREE: don't cache excessive counts of bts in arenas
  */
-#define STATIC_VMEM_COUNT 4
 #define STATIC_BT_COUNT 200
 #define BT_MINRESERVE 4
 #define BT_MAXFREE 64
-/* must be equal or greater then qcache multiplier for kmem_va_arena */
-#define STATIC_QC_POOL_COUNT 8
-
-static struct vmem static_vmems[STATIC_VMEM_COUNT];
-static int static_vmem_count = STATIC_VMEM_COUNT;
 
 static struct vmem_btag static_bts[STATIC_BT_COUNT];
 static int static_bt_count = STATIC_BT_COUNT;
 
-static struct pool_cache static_qc_pools[STATIC_QC_POOL_COUNT];
-static int static_qc_pool_count = STATIC_QC_POOL_COUNT;
-
+static struct vmem kmem_va_meta_arena_store;
 vmem_t *kmem_va_meta_arena;
+static struct vmem kmem_meta_arena_store;
 vmem_t *kmem_meta_arena;
 
 static kmutex_t vmem_refill_lock;
@@ -652,30 +557,17 @@
                snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
                    vm->vm_name, size);
 
-               if (vm->vm_flags & VM_BOOTSTRAP) {
-                       KASSERT(static_qc_pool_count > 0);
-                       pc = &static_qc_pools[--static_qc_pool_count];
-                       pool_cache_bootstrap(pc, size,
-                           ORDER2SIZE(vm->vm_quantum_shift), 0,
-                           PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */,
-                           qc->qc_name, pa, ipl, NULL, NULL, NULL);
-               } else {
-                       pc = pool_cache_init(size,
-                           ORDER2SIZE(vm->vm_quantum_shift), 0,
-                           PR_NOALIGN | PR_NOTOUCH /* XXX */,
-                           qc->qc_name, pa, ipl, NULL, NULL, NULL);
-               }
+               pc = pool_cache_init(size,
+                   ORDER2SIZE(vm->vm_quantum_shift), 0,
+                   PR_NOALIGN | PR_NOTOUCH /* XXX */,
+                   qc->qc_name, pa, ipl, NULL, NULL, NULL);
+
                qc->qc_cache = pc;
                KASSERT(qc->qc_cache != NULL);  /* XXX */
                if (prevqc != NULL &&
                    qc->qc_cache->pc_pool.pr_itemsperpage ==
                    prevqc->qc_cache->pc_pool.pr_itemsperpage) {
-                       if (vm->vm_flags & VM_BOOTSTRAP) {
-                               pool_cache_bootstrap_destroy(pc);
-                               //static_qc_pool_count++;
-                       } else {
-                               pool_cache_destroy(qc->qc_cache);
-                       }
+                       pool_cache_destroy(qc->qc_cache);
                        vm->vm_qcache[i - 1] = prevqc;
                        continue;
                }
@@ -700,18 +592,14 @@
                if (prevqc == qc) {
                        continue;
                }
-               if (vm->vm_flags & VM_BOOTSTRAP) {
-                       pool_cache_bootstrap_destroy(qc->qc_cache);
-               } else {
-                       pool_cache_destroy(qc->qc_cache);
-               }
+               pool_cache_destroy(qc->qc_cache);
                prevqc = qc;
        }
 }
 #endif
 
 #if defined(_KERNEL)
-void
+static void



Home | Main Index | Thread Index | Old Index