Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src - De-opaque'ify struct threadpool_job.



details:   https://anonhg.NetBSD.org/src/rev/d3b7a0a48a07
branches:  trunk
changeset: 837904:d3b7a0a48a07
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Wed Dec 26 18:54:19 2018 +0000

description:
- De-opaque'ify struct threadpool_job.
- De-_t'ify all of the structure types.

No functional chage, no ABI change (verified with old rump unit test
before and after new librump.so).

Per Taylor's request.

diffstat:

 share/man/man9/threadpool.9                        |   42 ++--
 sys/kern/kern_threadpool.c                         |  151 ++++++++------------
 sys/sys/threadpool.h                               |   54 ++++---
 tests/kernel/threadpool_tester/threadpool_tester.c |   30 ++--
 tests/rump/kernspace/threadpool.c                  |   22 +-
 5 files changed, 141 insertions(+), 158 deletions(-)

diffs (truncated from 840 to 300 lines):

diff -r a5519bb9bf4a -r d3b7a0a48a07 share/man/man9/threadpool.9
--- a/share/man/man9/threadpool.9       Wed Dec 26 18:31:29 2018 +0000
+++ b/share/man/man9/threadpool.9       Wed Dec 26 18:54:19 2018 +0000
@@ -1,4 +1,4 @@
-.\" $NetBSD: threadpool.9,v 1.1 2018/12/24 16:58:54 thorpej Exp $
+.\" $NetBSD: threadpool.9,v 1.2 2018/12/26 18:54:19 thorpej Exp $
 .\"
 .\" Copyright (c) 2014 The NetBSD Foundation, Inc.
 .\" All rights reserved.
@@ -39,40 +39,40 @@
 .In sys/threadpool.h
 .\""""""""""""""""""""""""""""""""""""
 .Ft int
-.Fn threadpool_get "threadpool_t **poolp" "pri_t pri"
+.Fn threadpool_get "struct threadpool **poolp" "pri_t pri"
 .\"
 .Ft void
-.Fn threadpool_put "threadpool_t *pool" "pri_t pri"
+.Fn threadpool_put "struct threadpool *pool" "pri_t pri"
 .\""""""""""""""""""""""""""""""""""""
 .Ft int
-.Fn threadpool_percpu_get "threadpool_percpu_t **pool_percpup" "pri_t pri"
+.Fn threadpool_percpu_get "struct threadpool_percpu **pool_percpup" "pri_t pri"
 .\"
 .Ft void
-.Fn threadpool_percpu_put "threadpool_percpu_t *pool_percpu" "pri_t pri"
+.Fn threadpool_percpu_put "struct threadpool_percpu *pool_percpu" "pri_t pri"
 .\"
-.Ft threadpool_t *
-.Fn threadpool_percpu_ref "threadpool_percpu_t *pool"
+.Ft struct threadpool *
+.Fn threadpool_percpu_ref "struct threadpool_percpu *pool"
 .\"
-.Ft threadpool_t *
-.Fn threadpool_percpu_ref_remote "threadpool_percpu_t *pool" "struct cpu_info *ci"
+.Ft struct threadpool *
+.Fn threadpool_percpu_ref_remote "struct threadpool_percpu *pool" "struct cpu_info *ci"
 .\""""""""""""""""""""""""""""""""""""
 .Ft void
-.Fn threadpool_job_init "threadpool_job_t *job" "void (*fn)(threadpool_job_t *)" "kmutex_t *interlock"
+.Fn threadpool_job_init "struct threadpool_job *job" "void (*fn)(struct threadpool_job *)" "kmutex_t *interlock"
 .\"
 .Ft void
-.Fn threadpool_job_destroy "threadpool_job_t *job"
+.Fn threadpool_job_destroy "struct threadpool_job *job"
 .\"
 .Ft void
-.Fn threadpool_job_done "threadpool_job_t *job"
+.Fn threadpool_job_done "struct threadpool_job *job"
 .\""""""""""""""""""""""""""""""""""""
 .Ft void
-.Fn threadpool_schedule_job "threadpool_t *pool" "threadpool_job_t *job"
+.Fn threadpool_schedule_job "struct threadpool *pool" "struct threadpool_job *job"
 .\"
 .Ft void
-.Fn threadpool_cancel_job "threadpool_t *pool" "threadpool_job_t *job"
+.Fn threadpool_cancel_job "struct threadpool *pool" "struct threadpool_job *job"
 .\"
 .Ft bool
-.Fn threadpool_cancel_job_async "threadpool_t *pool" "threadpool_job_t *job"
+.Fn threadpool_cancel_job_async "struct threadpool *pool" "struct threadpool_job *job"
 .\"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
 .Sh DESCRIPTION
 The
@@ -94,18 +94,18 @@
 .Fn threadpool_percpu_put .
 .Pp
 Job state is stored in the
-.Vt threadpool_job_t
-object.
+.Vt threadpool_job
+structure.
 Callers of the
 .Nm
 abstraction
 must allocate memory for
-.Vt threadpool_job_t
-objects, but should consider them opaque, and should not inspect or
+.Vt threadpool_job
+structures, but should consider them opaque, and should not inspect or
 copy them.
 Each job represented by a
-.Vt threadpool_job_t
-object will be run only once at a time, until the action associated
+.Vt threadpool_job
+structure will be run only once at a time, until the action associated
 with it calls
 .Fn threadpool_job_done .
 .Pp
diff -r a5519bb9bf4a -r d3b7a0a48a07 sys/kern/kern_threadpool.c
--- a/sys/kern/kern_threadpool.c        Wed Dec 26 18:31:29 2018 +0000
+++ b/sys/kern/kern_threadpool.c        Wed Dec 26 18:54:19 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_threadpool.c,v 1.3 2018/12/25 05:44:13 thorpej Exp $      */
+/*     $NetBSD: kern_threadpool.c,v 1.4 2018/12/26 18:54:19 thorpej Exp $      */
 
 /*-
  * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc.
@@ -76,12 +76,12 @@
  * touching remote CPUs' memory when scheduling a job, but that still
  * requires interprocessor synchronization.  Perhaps we could get by
  * with a single overseer thread, at the expense of another pointer in
- * struct threadpool_job_impl to identify the CPU on which it must run
+ * struct threadpool_job to identify the CPU on which it must run
  * in order for the overseer to schedule it correctly.
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.3 2018/12/25 05:44:13 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.4 2018/12/26 18:54:19 thorpej Exp $");
 
 #include <sys/types.h>
 #include <sys/param.h>
@@ -108,33 +108,16 @@
            RUN_ONCE(&threadpool_init_once, threadpools_init);  \
        KASSERT(threadpool_init_error == 0);                    \
 } while (/*CONSTCOND*/0)
-       
 
 /* Data structures */
 
-TAILQ_HEAD(job_head, threadpool_job_impl);
+TAILQ_HEAD(job_head, threadpool_job);
 TAILQ_HEAD(thread_head, threadpool_thread);
 
-typedef struct threadpool_job_impl {
-       kmutex_t                        *job_lock;              /* 1 */
-       struct threadpool_thread        *job_thread;            /* 1 */
-       TAILQ_ENTRY(threadpool_job_impl) job_entry;             /* 2 */
-       volatile unsigned int           job_refcnt;             /* 1 */
-                               /* implicit pad on _LP64 */
-       kcondvar_t                      job_cv;                 /* 3 */
-       threadpool_job_fn_t             job_fn;                 /* 1 */
-                                                           /* ILP32 / LP64 */
-       char                            job_name[MAXCOMLEN];    /* 4 / 2 */
-} threadpool_job_impl_t;
-
-CTASSERT(sizeof(threadpool_job_impl_t) <= sizeof(threadpool_job_t));
-#define        THREADPOOL_JOB_TO_IMPL(j)       ((threadpool_job_impl_t *)(j))
-#define        THREADPOOL_IMPL_TO_JOB(j)       ((threadpool_job_t *)(j))
-
 struct threadpool_thread {
        struct lwp                      *tpt_lwp;
-       threadpool_t                    *tpt_pool;
-       threadpool_job_impl_t           *tpt_job;
+       struct threadpool               *tpt_pool;
+       struct threadpool_job           *tpt_job;
        kcondvar_t                      tpt_cv;
        TAILQ_ENTRY(threadpool_thread)  tpt_entry;
 };
@@ -151,16 +134,16 @@
        pri_t                           tp_pri;
 };
 
-static int     threadpool_hold(threadpool_t *);
-static void    threadpool_rele(threadpool_t *);
+static int     threadpool_hold(struct threadpool *);
+static void    threadpool_rele(struct threadpool *);
 
-static int     threadpool_percpu_create(threadpool_percpu_t **, pri_t);
-static void    threadpool_percpu_destroy(threadpool_percpu_t *);
+static int     threadpool_percpu_create(struct threadpool_percpu **, pri_t);
+static void    threadpool_percpu_destroy(struct threadpool_percpu *);
 
-static void    threadpool_job_dead(threadpool_job_t *);
+static void    threadpool_job_dead(struct threadpool_job *);
 
-static int     threadpool_job_hold(threadpool_job_impl_t *);
-static void    threadpool_job_rele(threadpool_job_impl_t *);
+static int     threadpool_job_hold(struct threadpool_job *);
+static void    threadpool_job_rele(struct threadpool_job *);
 
 static void    threadpool_overseer_thread(void *) __dead;
 static void    threadpool_thread(void *) __dead;
@@ -220,10 +203,10 @@
 
 static LIST_HEAD(, threadpool_percpu) percpu_threadpools;
 
-static threadpool_percpu_t *
+static struct threadpool_percpu *
 threadpool_lookup_percpu(pri_t pri)
 {
-       threadpool_percpu_t *tpp;
+       struct threadpool_percpu *tpp;
 
        LIST_FOREACH(tpp, &percpu_threadpools, tpp_link) {
                if (tpp->tpp_pri == pri)
@@ -233,14 +216,14 @@
 }
 
 static void
-threadpool_insert_percpu(threadpool_percpu_t *tpp)
+threadpool_insert_percpu(struct threadpool_percpu *tpp)
 {
        KASSERT(threadpool_lookup_percpu(tpp->tpp_pri) == NULL);
        LIST_INSERT_HEAD(&percpu_threadpools, tpp, tpp_link);
 }
 
 static void
-threadpool_remove_percpu(threadpool_percpu_t *tpp)
+threadpool_remove_percpu(struct threadpool_percpu *tpp)
 {
        KASSERT(threadpool_lookup_percpu(tpp->tpp_pri) == tpp);
        LIST_REMOVE(tpp, tpp_link);
@@ -265,7 +248,7 @@
        mutex_init(&threadpools_lock, MUTEX_DEFAULT, IPL_NONE);
 
        TP_LOG(("%s: sizeof(threadpool_job) = %zu\n",
-           __func__, sizeof(threadpool_job_t)));
+           __func__, sizeof(struct threadpool_job)));
 
        return 0;
 }
@@ -279,10 +262,10 @@
 }
 
 static int
-threadpool_create(threadpool_t **poolp, struct cpu_info *ci, pri_t pri,
+threadpool_create(struct threadpool **poolp, struct cpu_info *ci, pri_t pri,
     size_t size)
 {
-       threadpool_t *const pool = kmem_zalloc(size, KM_SLEEP);
+       struct threadpool *const pool = kmem_zalloc(size, KM_SLEEP);
        struct lwp *lwp;
        int ktflags;
        int error;
@@ -340,7 +323,7 @@
 /* Thread pool destruction */
 
 static void
-threadpool_destroy(threadpool_t *pool, size_t size)
+threadpool_destroy(struct threadpool *pool, size_t size)
 {
        struct threadpool_thread *thread;
 
@@ -371,7 +354,7 @@
 }
 
 static int
-threadpool_hold(threadpool_t *pool)
+threadpool_hold(struct threadpool *pool)
 {
        unsigned int refcnt;
 
@@ -386,7 +369,7 @@
 }
 
 static void
-threadpool_rele(threadpool_t *pool)
+threadpool_rele(struct threadpool *pool)
 {
        unsigned int refcnt;
 
@@ -409,7 +392,7 @@
 /* Unbound thread pools */
 
 int
-threadpool_get(threadpool_t **poolp, pri_t pri)
+threadpool_get(struct threadpool **poolp, pri_t pri)
 {
        struct threadpool_unbound *tpu, *tmp = NULL;
        int error;
@@ -424,7 +407,7 @@
        mutex_enter(&threadpools_lock);
        tpu = threadpool_lookup_unbound(pri);
        if (tpu == NULL) {
-               threadpool_t *new_pool;
+               struct threadpool *new_pool;
                mutex_exit(&threadpools_lock);
                TP_LOG(("%s: No pool for pri=%d, creating one.\n",
                        __func__, (int)pri));
@@ -455,14 +438,14 @@
        mutex_exit(&threadpools_lock);
 
        if (tmp != NULL)
-               threadpool_destroy((threadpool_t *)tmp, sizeof(*tpu));
+               threadpool_destroy((struct threadpool *)tmp, sizeof(*tpu));
        KASSERT(tpu != NULL);
        *poolp = &tpu->tpu_pool;
        return 0;
 }
 
 void
-threadpool_put(threadpool_t *pool, pri_t pri)
+threadpool_put(struct threadpool *pool, pri_t pri)
 {
        struct threadpool_unbound *tpu =
            container_of(pool, struct threadpool_unbound, tpu_pool);
@@ -491,9 +474,9 @@
 /* Per-CPU thread pools */
 
 int
-threadpool_percpu_get(threadpool_percpu_t **pool_percpup, pri_t pri)
+threadpool_percpu_get(struct threadpool_percpu **pool_percpup, pri_t pri)
 {
-       threadpool_percpu_t *pool_percpu, *tmp = NULL;
+       struct threadpool_percpu *pool_percpu, *tmp = NULL;
        int error;
 
        THREADPOOL_INIT();



Home | Main Index | Thread Index | Old Index