Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/sys Do the last change differently:



details:   https://anonhg.NetBSD.org/src/rev/beaa9a2f7ca8
branches:  trunk
changeset: 1029133:beaa9a2f7ca8
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Wed Dec 22 16:57:28 2021 +0000

description:
Do the last change differently:

Instead of having a pre-destruct hook, put knowledge of passive
serialization into the pool allocator directly, enabled by PR_PSERIALIZE
when the pool / pool_cache is initialized.  This will guarantee that
a passive serialization barrier will be performed before the object's
destructor is called, or before the page containing the object is freed
back to the system (in the case of no destructor).  Note that the internal
allocator overhead is different when PR_PSERIALIZE is used (it implies
PR_NOTOUCH, because the objects must remain in a valid state).

In the DRM Linux API shim, this allows us to remove the custom page
allocator for SLAB_TYPESAFE_BY_RCU.

diffstat:

 sys/external/bsd/common/include/linux/slab.h |  46 +++--------------
 sys/kern/kern_lwp.c                          |  33 ++++--------
 sys/kern/subr_pool.c                         |  73 ++++++++++++++++++---------
 sys/sys/pool.h                               |   8 +-
 4 files changed, 71 insertions(+), 89 deletions(-)

diffs (truncated from 386 to 300 lines):

diff -r afcfdea60b0e -r beaa9a2f7ca8 sys/external/bsd/common/include/linux/slab.h
--- a/sys/external/bsd/common/include/linux/slab.h      Wed Dec 22 15:47:42 2021 +0000
+++ b/sys/external/bsd/common/include/linux/slab.h      Wed Dec 22 16:57:28 2021 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: slab.h,v 1.11 2021/12/21 19:07:09 thorpej Exp $        */
+/*     $NetBSD: slab.h,v 1.12 2021/12/22 16:57:29 thorpej Exp $        */
 
 /*-
  * Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -174,24 +174,6 @@
        void            (*kc_dtor)(void *);
 };
 
-/* XXX These should be in <sys/pool.h>.  */
-void * pool_page_alloc(struct pool *, int);
-void   pool_page_free(struct pool *, void *);
-
-static void
-pool_page_free_rcu(struct pool *pp, void *v)
-{
-
-       synchronize_rcu();
-       pool_page_free(pp, v);
-}
-
-static struct pool_allocator pool_allocator_kmem_rcu = {
-       .pa_alloc = pool_page_alloc,
-       .pa_free = pool_page_free_rcu,
-       .pa_pagesz = 0,
-};
-
 static int
 kmem_cache_ctor(void *cookie, void *ptr, int flags __unused)
 {
@@ -212,26 +194,20 @@
                (*kc->kc_dtor)(ptr);
 }
 
-static void
-kmem_cache_pre_dtor(void *cookie)
-{
-       synchronize_rcu();
-}
-
 static inline struct kmem_cache *
 kmem_cache_create(const char *name, size_t size, size_t align,
     unsigned long flags, void (*ctor)(void *))
 {
-       struct pool_allocator *palloc = NULL;
        struct kmem_cache *kc;
+       int pcflags = 0;
 
        if (ISSET(flags, SLAB_HWCACHE_ALIGN))
                align = roundup(MAX(1, align), CACHE_LINE_SIZE);
        if (ISSET(flags, SLAB_TYPESAFE_BY_RCU))
-               palloc = &pool_allocator_kmem_rcu;
+               pcflags |= PR_PSERIALIZE;
 
        kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
-       kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, palloc,
+       kc->kc_pool_cache = pool_cache_init(size, align, 0, pcflags, name, NULL,
            IPL_VM, &kmem_cache_ctor, NULL, kc);
        kc->kc_size = size;
        kc->kc_ctor = ctor;
@@ -244,26 +220,20 @@
 kmem_cache_create_dtor(const char *name, size_t size, size_t align,
     unsigned long flags, void (*ctor)(void *), void (*dtor)(void *))
 {
-       struct pool_allocator *palloc = NULL;
        struct kmem_cache *kc;
+       int pcflags = 0;
 
        if (ISSET(flags, SLAB_HWCACHE_ALIGN))
                align = roundup(MAX(1, align), CACHE_LINE_SIZE);
-       /*
-        * No need to use pool_allocator_kmem_rcu here; RCU synchronization
-        * will be handled by the pre-destructor hook.
-        */
+       if (ISSET(flags, SLAB_TYPESAFE_BY_RCU))
+               pcflags |= PR_PSERIALIZE;
 
        kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
-       kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, palloc,
+       kc->kc_pool_cache = pool_cache_init(size, align, 0, pcflags, name, NULL,
            IPL_VM, &kmem_cache_ctor, &kmem_cache_dtor, kc);
        kc->kc_size = size;
        kc->kc_ctor = ctor;
        kc->kc_dtor = dtor;
-       if (ISSET(flags, SLAB_TYPESAFE_BY_RCU)) {
-               pool_cache_setpredestruct(kc->kc_pool_cache,
-                   kmem_cache_pre_dtor);
-       }
 
        return kc;
 }
diff -r afcfdea60b0e -r beaa9a2f7ca8 sys/kern/kern_lwp.c
--- a/sys/kern/kern_lwp.c       Wed Dec 22 15:47:42 2021 +0000
+++ b/sys/kern/kern_lwp.c       Wed Dec 22 16:57:28 2021 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_lwp.c,v 1.245 2021/12/21 19:00:37 thorpej Exp $   */
+/*     $NetBSD: kern_lwp.c,v 1.246 2021/12/22 16:57:28 thorpej Exp $   */
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020
@@ -217,7 +217,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.245 2021/12/21 19:00:37 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.246 2021/12/22 16:57:28 thorpej Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -262,7 +262,6 @@
 
 static int             lwp_ctor(void *, void *, int);
 static void            lwp_dtor(void *, void *);
-static void            lwp_pre_dtor(void *);
 
 /* DTrace proc provider probes */
 SDT_PROVIDER_DEFINE(proc);
@@ -340,9 +339,16 @@
 
        LIST_INIT(&alllwp);
        lwpinit_specificdata();
-       lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
-           "lwppl", NULL, IPL_NONE, lwp_ctor, lwp_dtor, NULL);
-       pool_cache_setpredestruct(lwp_cache, lwp_pre_dtor);
+       /*
+        * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu()
+        * calls will exit before memory of LWPs is returned to the pool, where
+        * KVA of LWP structure might be freed and re-used for other purposes.
+        * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu()
+        * callers, therefore a regular passive serialization barrier will
+        * do the job.
+        */
+       lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0,
+           PR_PSERIALIZE, "lwppl", NULL, IPL_NONE, lwp_ctor, lwp_dtor, NULL);
 
        maxlwp = cpu_maxlwp();
        sysctl_kern_lwp_setup();
@@ -393,21 +399,6 @@
 }
 
 static void
-lwp_pre_dtor(void *arg __unused)
-{
-       /*
-        * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu()
-        * calls will exit before memory of LWPs is returned to the pool, where
-        * KVA of LWP structure might be freed and re-used for other purposes.
-        * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu()
-        * callers, therefore cross-call to all CPUs will do the job.
-        *
-        * XXX should use epoch based reclamation.
-        */
-       xc_barrier(0);
-}
-
-static void
 lwp_dtor(void *arg, void *obj)
 {
        lwp_t *l = obj;
diff -r afcfdea60b0e -r beaa9a2f7ca8 sys/kern/subr_pool.c
--- a/sys/kern/subr_pool.c      Wed Dec 22 15:47:42 2021 +0000
+++ b/sys/kern/subr_pool.c      Wed Dec 22 16:57:28 2021 +0000
@@ -1,8 +1,8 @@
-/*     $NetBSD: subr_pool.c,v 1.278 2021/12/21 18:59:22 thorpej Exp $  */
+/*     $NetBSD: subr_pool.c,v 1.279 2021/12/22 16:57:28 thorpej Exp $  */
 
 /*
  * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015, 2018,
- *     2020 The NetBSD Foundation, Inc.
+ *     2020, 2021 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -33,7 +33,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.278 2021/12/21 18:59:22 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.279 2021/12/22 16:57:28 thorpej Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_ddb.h"
@@ -142,9 +142,14 @@
 #define NO_CTOR        __FPTRCAST(int (*)(void *, void *, int), nullop)
 #define NO_DTOR        __FPTRCAST(void (*)(void *, void *), nullop)
 
+#define pc_has_pser(pc) (((pc)->pc_roflags & PR_PSERIALIZE) != 0)
 #define pc_has_ctor(pc) ((pc)->pc_ctor != NO_CTOR)
 #define pc_has_dtor(pc) ((pc)->pc_dtor != NO_DTOR)
 
+#define pp_has_pser(pp) (((pp)->pr_roflags & PR_PSERIALIZE) != 0)
+
+#define pool_barrier() xc_barrier(0)
+
 /*
  * Pool backend allocators.
  *
@@ -479,6 +484,8 @@
 {
        struct pool_item *pi = obj;
 
+       KASSERT(!pp_has_pser(pp));
+
 #ifdef POOL_CHECK_MAGIC
        pi->pi_magic = PI_MAGIC;
 #endif
@@ -841,6 +848,14 @@
        if (!cold)
                mutex_exit(&pool_allocator_lock);
 
+       /*
+        * PR_PSERIALIZE implies PR_NOTOUCH; freed objects must remain
+        * valid until the the backing page is returned to the system.
+        */
+       if (flags & PR_PSERIALIZE) {
+               flags |= PR_NOTOUCH;
+       }
+
        if (align == 0)
                align = ALIGN(1);
 
@@ -2095,6 +2110,7 @@
        pool_cache_t pc1;
        struct cpu_info *ci;
        struct pool *pp;
+       unsigned int ppflags = flags;
 
        pp = &pc->pc_pool;
        if (palloc == NULL && ipl == IPL_NONE) {
@@ -2106,22 +2122,29 @@
                } else
                        palloc = &pool_allocator_nointr;
        }
-       pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
 
        if (ctor == NULL) {
                ctor = NO_CTOR;
        }
        if (dtor == NULL) {
                dtor = NO_DTOR;
+       } else {
+               /*
+                * If we have a destructor, then the pool layer does not
+                * need to worry about PR_PSERIALIZE.
+                */
+               ppflags &= ~PR_PSERIALIZE;
        }
 
+       pool_init(pp, size, align, align_offset, ppflags, wchan, palloc, ipl);
+
        pc->pc_fullgroups = NULL;
        pc->pc_partgroups = NULL;
        pc->pc_ctor = ctor;
        pc->pc_dtor = dtor;
-       pc->pc_pre_dtor = NULL;
        pc->pc_arg  = arg;
        pc->pc_refcnt = 0;
+       pc->pc_roflags = flags;
        pc->pc_freecheck = NULL;
 
        if ((flags & PR_LARGECACHE) != 0) {
@@ -2165,19 +2188,6 @@
 }
 
 /*
- * pool_cache_setpredestruct:
- *
- *     Set a pre-destructor hook for the specified pool cache.
- */
-void
-pool_cache_setpredestruct(pool_cache_t pc, void (*fn)(void *))
-{
-       KASSERT(pc->pc_pre_dtor == NULL);
-       pc->pc_pre_dtor = fn;
-       membar_sync();
-}
-
-/*
  * pool_cache_destroy:
  *
  *     Destroy a pool cache.
@@ -2308,13 +2318,11 @@
 pool_cache_pre_destruct(pool_cache_t pc)
 {
        /*
-        * Call the pre-destruct hook before destructing a batch
-        * of objects.  Users of this hook can perform passive
-        * serialization other other activities that need to be
-        * performed once-per-batch (rather than once-per-object).
+        * Perform a passive serialization barrier before destructing
+        * a batch of one or more objects.
         */
-       if (__predict_false(pc->pc_pre_dtor != NULL)) {
-               (*pc->pc_pre_dtor)(pc->pc_arg);
+       if (__predict_false(pc_has_pser(pc))) {
+               pool_barrier();



Home | Main Index | Thread Index | Old Index