Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/external/bsd/common/include/linux KMEM_CACHE, SLAB_TYPES...



details:   https://anonhg.NetBSD.org/src/rev/7a61862de676
branches:  trunk
changeset: 1027995:7a61862de676
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Sun Dec 19 01:33:44 2021 +0000

description:
KMEM_CACHE, SLAB_TYPESAFE_BY_RCU

diffstat:

 sys/external/bsd/common/include/linux/slab.h |  34 +++++++++++++++++++++++++--
 1 files changed, 31 insertions(+), 3 deletions(-)

diffs (82 lines):

diff -r d48fa57caec3 -r 7a61862de676 sys/external/bsd/common/include/linux/slab.h
--- a/sys/external/bsd/common/include/linux/slab.h      Sun Dec 19 01:33:17 2021 +0000
+++ b/sys/external/bsd/common/include/linux/slab.h      Sun Dec 19 01:33:44 2021 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: slab.h,v 1.2 2021/12/19 01:24:49 riastradh Exp $       */
+/*     $NetBSD: slab.h,v 1.3 2021/12/19 01:33:44 riastradh Exp $       */
 
 /*-
  * Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -40,6 +40,7 @@
 #include <uvm/uvm_extern.h>    /* For PAGE_SIZE.  */
 
 #include <linux/gfp.h>
+#include <linux/rcupdate.h>
 
 /* XXX Should use kmem, but Linux kfree doesn't take the size.  */
 
@@ -126,7 +127,9 @@
                free(ptr, M_TEMP);
 }
 
-#define        SLAB_HWCACHE_ALIGN      1
+#define        SLAB_HWCACHE_ALIGN      __BIT(0)
+#define        SLAB_RECLAIM_ACCOUNT    __BIT(1)
+#define        SLAB_TYPESAFE_BY_RCU    __BIT(2)
 
 struct kmem_cache {
        pool_cache_t    kc_pool_cache;
@@ -134,6 +137,24 @@
        void            (*kc_ctor)(void *);
 };
 
+/* XXX These should be in <sys/pool.h>.  */
+void * pool_page_alloc(struct pool *, int);
+void   pool_page_free(struct pool *, void *);
+
+static void
+pool_page_free_rcu(struct pool *pp, void *v)
+{
+
+       synchronize_rcu();
+       pool_page_free(pp, v);
+}
+
+static struct pool_allocator pool_allocator_kmem_rcu = {
+       .pa_alloc = pool_page_alloc,
+       .pa_free = pool_page_free_rcu,
+       .pa_pagesz = 0,
+};
+
 static int
 kmem_cache_ctor(void *cookie, void *ptr, int flags __unused)
 {
@@ -149,13 +170,16 @@
 kmem_cache_create(const char *name, size_t size, size_t align,
     unsigned long flags, void (*ctor)(void *))
 {
+       struct pool_allocator *palloc = NULL;
        struct kmem_cache *kc;
 
        if (ISSET(flags, SLAB_HWCACHE_ALIGN))
                align = roundup(MAX(1, align), CACHE_LINE_SIZE);
+       if (ISSET(flags, SLAB_TYPESAFE_BY_RCU))
+               palloc = &pool_allocator_kmem_rcu;
 
        kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
-       kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, NULL,
+       kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, palloc,
            IPL_NONE, &kmem_cache_ctor, NULL, kc);
        kc->kc_size = size;
        kc->kc_ctor = ctor;
@@ -163,6 +187,10 @@
        return kc;
 }
 
+#define        KMEM_CACHE(T, F)                                                      \
+       kmem_cache_create(#T, sizeof(struct T), __alignof__(struct T),        \
+           (F), NULL)
+
 static inline void
 kmem_cache_destroy(struct kmem_cache *kc)
 {



Home | Main Index | Thread Index | Old Index