tech-kern archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: tmpfs module and pool allocator
I've "fixed it" by explicitly calling mutex_destroy() in
tmpfs_modcmd():MODULE_CMD_FINI, but I think the right fix would be to
set a flag indicating that the pool allocator used is not a "system
allocator" (or that it is a "custom allocator", or whatever) and
should be destroyed as well, or at least its mutex...
Matt@ suggested we use a reference count on the pool allocator instead
of the PA_INITIALIZED flag, and when it drops to zero, pool_destroy can
destroy the mutex as well.
I've attached a diff that supposedly implements it... But I don't know
if it's correct. It fixes the problem for me, on a GEENRIC kernel built
with DIAGNOSTIC and LOCKDEBUG.
Please have a look and let me know if it's okay to commit.
Thanks,
-e.
Index: sys/pool.h
===================================================================
RCS file: /cvsroot/src/sys/sys/pool.h,v
retrieving revision 1.67
diff -u -p -r1.67 pool.h
--- sys/pool.h 15 Oct 2009 20:50:12 -0000 1.67
+++ sys/pool.h 30 Dec 2009 17:58:17 -0000
@@ -64,7 +64,7 @@ struct pool_allocator {
kmutex_t pa_lock;
TAILQ_HEAD(, pool) pa_list; /* list of pools using this allocator */
int pa_flags;
-#define PA_INITIALIZED 0x01
+ uint32_t pa_refcnt; /* number of pools using this allocator
*/
int pa_pagemask;
int pa_pageshift;
struct vm_map *pa_backingmap;
Index: kern/subr_pool.c
===================================================================
RCS file: /cvsroot/src/sys/kern/subr_pool.c,v
retrieving revision 1.177
diff -u -p -r1.177 subr_pool.c
--- kern/subr_pool.c 20 Oct 2009 17:24:22 -0000 1.177
+++ kern/subr_pool.c 30 Dec 2009 17:58:18 -0000
@@ -104,6 +104,9 @@ static struct pool *drainpp;
static kmutex_t pool_head_lock;
static kcondvar_t pool_busy;
+/* This lock protects initialization of a potentially shared pool allocator */
+static kmutex_t pool_allocator_lock;
+
typedef uint32_t pool_item_bitmap_t;
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
#define BITMAP_MASK (BITMAP_SIZE - 1)
@@ -604,6 +607,8 @@ pool_subsystem_init(void)
pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
+
+ mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
}
/*
@@ -650,7 +655,8 @@ pool_init(struct pool *pp, size_t size,
palloc = &pool_allocator_nointr_fullpage;
}
#endif /* POOL_SUBPAGE */
- if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
+ mutex_enter(&pool_allocator_lock);
+ if (palloc->pa_refcnt++ == 0) {
if (palloc->pa_pagesz == 0)
palloc->pa_pagesz = PAGE_SIZE;
@@ -663,8 +669,8 @@ pool_init(struct pool *pp, size_t size,
if (palloc->pa_backingmapptr != NULL) {
pa_reclaim_register(palloc);
}
- palloc->pa_flags |= PA_INITIALIZED;
}
+ mutex_exit(&pool_allocator_lock);
if (align == 0)
align = ALIGN(1);
@@ -892,6 +898,11 @@ pool_destroy(struct pool *pp)
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
mutex_exit(&pp->pr_alloc->pa_lock);
+ mutex_enter(&pool_allocator_lock);
+ if (--pp->pr_alloc->pa_refcnt == 0)
+ mutex_destroy(&pp->pr_alloc->pa_lock);
+ mutex_exit(&pool_allocator_lock);
+
mutex_enter(&pp->pr_lock);
KASSERT(pp->pr_cache == NULL);
Home |
Main Index |
Thread Index |
Old Index