tech-kern archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

vmem boundary tag manged with pool



Hi,

I would like to change the way vmem(9) allocates boundary tags towards
using a pool(9) for that.

This de-duplicates code and makes it possible to release memory pages
used for boundary tags.

early patch any comments?

Lars

Index: sys/kern/subr_vmem.c
===================================================================
RCS file: /cvsroot/src/sys/kern/subr_vmem.c,v
retrieving revision 1.86
diff -u -r1.86 subr_vmem.c
--- sys/kern/subr_vmem.c        25 Oct 2013 11:35:55 -0000      1.86
+++ sys/kern/subr_vmem.c        19 Nov 2013 06:38:25 -0000
@@ -75,14 +75,13 @@
 #include <sys/evcnt.h>
 #define VMEM_EVCNT_DEFINE(name) \
 struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
-    "vmemev", #name); \
+    "vmem", #name); \
 EVCNT_ATTACH_STATIC(vmem_evcnt_##name);
 #define VMEM_EVCNT_INCR(ev)    vmem_evcnt_##ev.ev_count++
 #define VMEM_EVCNT_DECR(ev)    vmem_evcnt_##ev.ev_count--
 
-VMEM_EVCNT_DEFINE(bt_pages)
-VMEM_EVCNT_DEFINE(bt_count)
-VMEM_EVCNT_DEFINE(bt_inuse)
+VMEM_EVCNT_DEFINE(static_bt_count)
+VMEM_EVCNT_DEFINE(static_bt_inuse)
 
 #define        VMEM_CONDVAR_INIT(vm, wchan)    cv_init(&vm->vm_cv, wchan)
 #define        VMEM_CONDVAR_DESTROY(vm)        cv_destroy(&vm->vm_cv)
@@ -125,6 +124,7 @@
 
 #if defined(_KERNEL)
 static bool vmem_bootstrapped = false;
+static bool vmem_initialized = false;
 static kmutex_t vmem_list_lock;
 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
 #endif /* defined(_KERNEL) */
@@ -165,9 +165,9 @@
  * This reserve is 4 for each arena involved in allocating vmems memory.
  * BT_MAXFREE: don't cache excessive counts of bts in arenas
  */
-#define STATIC_BT_COUNT 200
+#define STATIC_BT_COUNT 400
 #define BT_MINRESERVE 4
-#define BT_MAXFREE 64
+#define BT_MAXFREE 16
 
 static struct vmem_btag static_bts[STATIC_BT_COUNT];
 static int static_bt_count = STATIC_BT_COUNT;
@@ -177,79 +177,54 @@
 static struct vmem kmem_meta_arena_store;
 vmem_t *kmem_meta_arena;
 
-static kmutex_t vmem_refill_lock;
+static kmutex_t vmem_btag_refill_lock;
 static kmutex_t vmem_btag_lock;
 static LIST_HEAD(, vmem_btag) vmem_btag_freelist;
 static size_t vmem_btag_freelist_count = 0;
-static size_t vmem_btag_count = STATIC_BT_COUNT;
+static struct pool vmem_btag_pool;
 
 /* ---- boundary tag */
 
-#define        BT_PER_PAGE     (PAGE_SIZE / sizeof(bt_t))
-
 static int bt_refill(vmem_t *vm, vm_flag_t flags);
 
-static int
-bt_refillglobal(vm_flag_t flags)
+static void *
+pool_page_alloc_vmem_meta(struct pool *pp, int flags)
 {
+       const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
        vmem_addr_t va;
-       bt_t *btp;
-       bt_t *bt;
-       int i;
-
-       mutex_enter(&vmem_refill_lock);
-
-       mutex_enter(&vmem_btag_lock);
-       if (vmem_btag_freelist_count > 0) {
-               mutex_exit(&vmem_btag_lock);
-               mutex_exit(&vmem_refill_lock);
-               return 0;
-       }
-       mutex_exit(&vmem_btag_lock);
+       int ret;
 
-       if (vmem_alloc(kmem_meta_arena, PAGE_SIZE,
-           (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va) != 0) {
-               mutex_exit(&vmem_refill_lock);
-               return ENOMEM;
-       }
-       VMEM_EVCNT_INCR(bt_pages);
+       ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
+           (vflags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va);
 
-       mutex_enter(&vmem_btag_lock);
-       btp = (void *) va;
-       for (i = 0; i < (BT_PER_PAGE); i++) {
-               bt = btp;
-               memset(bt, 0, sizeof(*bt));
-               LIST_INSERT_HEAD(&vmem_btag_freelist, bt,
-                   bt_freelist);
-               vmem_btag_freelist_count++;
-               vmem_btag_count++;
-               VMEM_EVCNT_INCR(bt_count);
-               btp++;
-       }
-       mutex_exit(&vmem_btag_lock);
-
-       bt_refill(kmem_arena, (flags & ~VM_FITMASK)
-           | VM_INSTANTFIT | VM_POPULATING);
-       bt_refill(kmem_va_meta_arena, (flags & ~VM_FITMASK)
-           | VM_INSTANTFIT | VM_POPULATING);
-       bt_refill(kmem_meta_arena, (flags & ~VM_FITMASK)
-           | VM_INSTANTFIT | VM_POPULATING);
+       return ret ? NULL : (void *)va;
+}
 
-       mutex_exit(&vmem_refill_lock);
+static void
+pool_page_free_vmem_meta(struct pool *pp, void *v)
+{
 
-       return 0;
+       vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
 }
 
+/* allocator for vmem-pool metadata */
+struct pool_allocator pool_allocator_vmem_meta = {
+       .pa_alloc = pool_page_alloc_vmem_meta,
+       .pa_free = pool_page_free_vmem_meta,
+       .pa_pagesz = 0
+};
+
 static int
 bt_refill(vmem_t *vm, vm_flag_t flags)
 {
        bt_t *bt;
 
-       if (!(flags & VM_POPULATING)) {
-               bt_refillglobal(flags);
+       VMEM_LOCK(vm);
+       if (vm->vm_nfreetags > BT_MINRESERVE) {
+               VMEM_UNLOCK(vm);
+               return 0;
        }
 
-       VMEM_LOCK(vm);
        mutex_enter(&vmem_btag_lock);
        while (!LIST_EMPTY(&vmem_btag_freelist) &&
            vm->vm_nfreetags <= BT_MINRESERVE) {
@@ -258,15 +233,37 @@
                LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
                vm->vm_nfreetags++;
                vmem_btag_freelist_count--;
+               VMEM_EVCNT_INCR(static_bt_inuse);
        }
        mutex_exit(&vmem_btag_lock);
 
+       while (vm->vm_nfreetags <= BT_MINRESERVE) {
+               VMEM_UNLOCK(vm);
+               mutex_enter(&vmem_btag_refill_lock);
+               bt = pool_get(&vmem_btag_pool,
+                   (flags & VM_SLEEP) ? PR_WAITOK: PR_NOWAIT);
+               mutex_exit(&vmem_btag_refill_lock);
+               VMEM_LOCK(vm);
+               LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
+               vm->vm_nfreetags++;
+       }
+
        if (vm->vm_nfreetags == 0) {
                VMEM_UNLOCK(vm);
                return ENOMEM;
        }
+
        VMEM_UNLOCK(vm);
 
+       if (vmem_initialized) {
+               bt_refill(kmem_arena, (flags & ~VM_FITMASK)
+                   | VM_INSTANTFIT | VM_POPULATING);
+               bt_refill(kmem_va_meta_arena, (flags & ~VM_FITMASK)
+                   | VM_INSTANTFIT | VM_POPULATING);
+               bt_refill(kmem_meta_arena, (flags & ~VM_FITMASK)
+                   | VM_INSTANTFIT | VM_POPULATING);
+       }
+
        return 0;
 }
 
@@ -276,8 +273,7 @@
        bt_t *bt;
 again:
        VMEM_LOCK(vm);
-       if (vm->vm_nfreetags <= BT_MINRESERVE &&
-           (flags & VM_POPULATING) == 0) {
+       if (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) {
                VMEM_UNLOCK(vm);
                if (bt_refill(vm, VM_NOSLEEP | VM_INSTANTFIT)) {
                        return NULL;
@@ -288,7 +284,6 @@
        LIST_REMOVE(bt, bt_freelist);
        vm->vm_nfreetags--;
        VMEM_UNLOCK(vm);
-       VMEM_EVCNT_INCR(bt_inuse);
 
        return bt;
 }
@@ -304,13 +299,22 @@
                bt = LIST_FIRST(&vm->vm_freetags);
                LIST_REMOVE(bt, bt_freelist);
                vm->vm_nfreetags--;
-               mutex_enter(&vmem_btag_lock);
-               LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
-               vmem_btag_freelist_count++;
-               mutex_exit(&vmem_btag_lock);
+               if (bt >= static_bts
+                   && bt < static_bts + sizeof(static_bts)) {
+                       mutex_enter(&vmem_btag_lock);
+                       LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
+                       vmem_btag_freelist_count++;
+                       mutex_exit(&vmem_btag_lock);
+                       VMEM_EVCNT_DECR(static_bt_inuse);
+               } else {
+                       VMEM_UNLOCK(vm);
+                       mutex_enter(&vmem_btag_refill_lock);
+                       pool_put(&vmem_btag_pool, bt);
+                       mutex_exit(&vmem_btag_refill_lock);
+                       VMEM_LOCK(vm);
+               }
        }
        VMEM_UNLOCK(vm);
-       VMEM_EVCNT_DECR(bt_inuse);
 }
 
 #endif /* defined(_KERNEL) */
@@ -603,13 +607,13 @@
 {
 
        mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_VM);
-       mutex_init(&vmem_refill_lock, MUTEX_DEFAULT, IPL_VM);
        mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM);
+       mutex_init(&vmem_btag_refill_lock, MUTEX_DEFAULT, IPL_VM);
 
        while (static_bt_count-- > 0) {
                bt_t *bt = &static_bts[static_bt_count];
                LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
-               VMEM_EVCNT_INCR(bt_count);
+               VMEM_EVCNT_INCR(static_bt_count);
                vmem_btag_freelist_count++;
        }
        vmem_bootstrapped = TRUE;
@@ -628,6 +632,11 @@
            0, 0, PAGE_SIZE,
            uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena,
            0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
+
+       pool_init(&vmem_btag_pool, sizeof(bt_t), 0, 0, 0,
+                   "vmembt", &pool_allocator_vmem_meta, IPL_VM);
+       
+       vmem_initialized = TRUE;
 }
 #endif /* defined(_KERNEL) */
 
@@ -699,12 +708,20 @@
                bt_t *bt = LIST_FIRST(&vm->vm_freetags);
                LIST_REMOVE(bt, bt_freelist);
                vm->vm_nfreetags--;
-               mutex_enter(&vmem_btag_lock);
+               if (bt >= static_bts
+                   && bt < static_bts + sizeof(static_bts)) {
+                       mutex_enter(&vmem_btag_lock);
 #if defined (_KERNEL)
-               LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
-               vmem_btag_freelist_count++;
+                       LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
+                       vmem_btag_freelist_count++;
 #endif /* defined(_KERNEL) */
-               mutex_exit(&vmem_btag_lock);
+                       mutex_exit(&vmem_btag_lock);
+                       VMEM_EVCNT_DECR(static_bt_inuse);
+               } else {
+                       mutex_enter(&vmem_btag_refill_lock);
+                       pool_put(&vmem_btag_pool, bt);
+                       mutex_exit(&vmem_btag_refill_lock);
+               }
        }
 
        VMEM_CONDVAR_DESTROY(vm);


Home | Main Index | Thread Index | Old Index