Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys This is a first-cut implementation of support for cachin...



details:   https://anonhg.NetBSD.org/src/rev/147928e628c2
branches:  trunk
changeset: 500165:147928e628c2
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Thu Dec 07 05:45:57 2000 +0000

description:
This is a first-cut implementation of support for caching of
constructed objects in the pool allocator, similar to caching
of constructed objects in the Solaris SLAB allocator.

This implementation is a separate API (pool_cache_*()) layered
on top of pools to keep the caching complexity out of the way
of pools that won't benefit from it.

While we're here, allow pool items to be as large as the pool
page size.

diffstat:

 sys/kern/subr_pool.c |  340 ++++++++++++++++++++++++++++++++++++++++++++++++--
 sys/sys/pool.h       |   35 ++++-
 2 files changed, 358 insertions(+), 17 deletions(-)

diffs (truncated from 513 to 300 lines):

diff -r f77ff54fd0a1 -r 147928e628c2 sys/kern/subr_pool.c
--- a/sys/kern/subr_pool.c      Thu Dec 07 03:55:44 2000 +0000
+++ b/sys/kern/subr_pool.c      Thu Dec 07 05:45:57 2000 +0000
@@ -1,7 +1,7 @@
-/*     $NetBSD: subr_pool.c,v 1.42 2000/12/06 18:20:52 thorpej Exp $   */
+/*     $NetBSD: subr_pool.c,v 1.43 2000/12/07 05:45:57 thorpej Exp $   */
 
 /*-
- * Copyright (c) 1997, 1999 The NetBSD Foundation, Inc.
+ * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -100,11 +100,49 @@
        TAILQ_ENTRY(pool_item)  pi_list;
 };
 
-
 #define        PR_HASH_INDEX(pp,addr) \
        (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
 
+/*
+ * Pool cache management.
+ *
+ * Pool caches provide a way for constructed objects to be cached by the
+ * pool subsystem.  This can lead to performance improvements by avoiding
+ * needless object construction/destruction; it is deferred until absolutely
+ * necessary.
+ *
+ * Caches are grouped into cache groups.  Each cache group references
+ * up to 16 constructed objects.  When a cache allocates an object
+ * from the pool, it calls the object's constructor and places it into
+ * a cache group.  When a cache group frees an object back to the pool,
+ * it first calls the object's destructor.  This allows the object to
+ * persist in constructed form while freed to the cache.
+ *
+ * Multiple caches may exist for each pool.  This allows a single
+ * object type to have multiple constructed forms.  The pool references
+ * each cache, so that when a pool is drained by the pagedaemon, it can
+ * drain each individual cache as well.  Each time a cache is drained,
+ * the most idle cache group is freed to the pool in its entirety.
+ *
+ * Pool caches are layed on top of pools.  By layering them, we can avoid
+ * the complexity of cache management for pools which would not benefit
+ * from it.
+ */
 
+/* The cache group pool. */
+static struct pool pcgpool;
+
+/* The pool cache group. */
+#define        PCG_NOBJECTS            16
+struct pool_cache_group {
+       TAILQ_ENTRY(pool_cache_group)
+               pcg_list;       /* link in the pool cache's group list */
+       u_int   pcg_avail;      /* # available objects */
+                               /* pointers to the objects */
+       void    *pcg_objects[PCG_NOBJECTS];
+};
+
+static void    pool_cache_reclaim(struct pool_cache *);
 
 static int     pool_catchup(struct pool *);
 static void    pool_prime_page(struct pool *, caddr_t);
@@ -384,7 +422,7 @@
                size = sizeof(struct pool_item);
 
        size = ALIGN(size);
-       if (size >= pagesz)
+       if (size > pagesz)
                panic("pool_init: pool item size (%lu) too large",
                      (u_long)size);
 
@@ -392,6 +430,7 @@
         * Initialize the pool structure.
         */
        TAILQ_INIT(&pp->pr_pagelist);
+       TAILQ_INIT(&pp->pr_cachelist);
        pp->pr_curpage = NULL;
        pp->pr_npages = 0;
        pp->pr_minitems = 0;
@@ -447,6 +486,7 @@
         */
        pp->pr_itemoffset = ioff = ioff % align;
        pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
+       KASSERT(pp->pr_itemsperpage != 0);
 
        /*
         * Use the slack between the chunks and the page header
@@ -479,12 +519,15 @@
        simple_lock_init(&pp->pr_slock);
 
        /*
-        * Initialize private page header pool if we haven't done so yet.
+        * Initialize private page header pool and cache magazine pool if we
+        * haven't done so yet.
         * XXX LOCKING.
         */
        if (phpool.pr_size == 0) {
                pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
-                         0, "phpool", 0, 0, 0, 0);
+                   0, "phpool", 0, 0, 0, 0);
+               pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
+                   0, "pcgpool", 0, 0, 0, 0);
        }
 
        /* Insert into the list of all pools. */
@@ -500,6 +543,11 @@
 pool_destroy(struct pool *pp)
 {
        struct pool_item_header *ph;
+       struct pool_cache *pc;
+
+       /* Destroy all caches for this pool. */
+       while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
+               pool_cache_destroy(pc);
 
 #ifdef DIAGNOSTIC
        if (pp->pr_nout != 0) {
@@ -762,10 +810,10 @@
 }
 
 /*
- * Return resource to the pool; must be called at appropriate spl level
+ * Internal version of pool_put().  Pool is already locked/entered.
  */
-void
-_pool_put(struct pool *pp, void *v, const char *file, long line)
+static void
+pool_do_put(struct pool *pp, void *v, const char *file, long line)
 {
        struct pool_item *pi = v;
        struct pool_item_header *ph;
@@ -774,9 +822,6 @@
 
        page = (caddr_t)((u_long)v & pp->pr_pagemask);
 
-       simple_lock(&pp->pr_slock);
-       pr_enter(pp, file, line);
-
 #ifdef DIAGNOSTIC
        if (__predict_false(pp->pr_nout == 0)) {
                printf("pool %s: putting with none out\n",
@@ -829,8 +874,6 @@
                pp->pr_flags &= ~PR_WANTED;
                if (ph->ph_nmissing == 0)
                        pp->pr_nidle++;
-               pr_leave(pp);
-               simple_unlock(&pp->pr_slock);
                wakeup((caddr_t)pp);
                return;
        }
@@ -894,10 +937,22 @@
                TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
                pp->pr_curpage = ph;
        }
+}
+
+/*
+ * Return resource to the pool; must be called at appropriate spl level
+ */
+void
+_pool_put(struct pool *pp, void *v, const char *file, long line)
+{
+
+       simple_lock(&pp->pr_slock);
+       pr_enter(pp, file, line);
+
+       pool_do_put(pp, v, file, line);
 
        pr_leave(pp);
        simple_unlock(&pp->pr_slock);
-
 }
 
 /*
@@ -1188,6 +1243,7 @@
 _pool_reclaim(struct pool *pp, const char *file, long line)
 {
        struct pool_item_header *ph, *phnext;
+       struct pool_cache *pc;
        struct timeval curtime;
        int s;
 
@@ -1198,6 +1254,13 @@
                return;
        pr_enter(pp, file, line);
 
+       /*
+        * Reclaim items from the pool's caches.
+        */
+       for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL;
+            pc = TAILQ_NEXT(pc, pc_poollist))
+               pool_cache_reclaim(pc);
+
        s = splclock();
        curtime = mono_time;
        splx(s);
@@ -1446,3 +1509,250 @@
        simple_unlock(&pp->pr_slock);
        return (r);
 }
+
+/*
+ * pool_cache_init:
+ *
+ *     Initialize a pool cache.
+ *
+ *     NOTE: If the pool must be protected from interrupts, we expect
+ *     to be called at the appropriate interrupt priority level.
+ */
+void
+pool_cache_init(struct pool_cache *pc, struct pool *pp,
+    int (*ctor)(void *, void *, int),
+    void (*dtor)(void *, void *),
+    void *arg)
+{
+
+       TAILQ_INIT(&pc->pc_grouplist);
+       simple_lock_init(&pc->pc_slock);
+
+       pc->pc_allocfrom = NULL;
+       pc->pc_freeto = NULL;
+       pc->pc_pool = pp;
+
+       pc->pc_ctor = ctor;
+       pc->pc_dtor = dtor;
+       pc->pc_arg  = arg;
+
+       simple_lock(&pp->pr_slock);
+       TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
+       simple_unlock(&pp->pr_slock);
+}
+
+/*
+ * pool_cache_destroy:
+ *
+ *     Destroy a pool cache.
+ */
+void
+pool_cache_destroy(struct pool_cache *pc)
+{
+       struct pool *pp = pc->pc_pool;
+
+       /* First, invalidate the entire cache. */
+       pool_cache_invalidate(pc);
+
+       /* ...and remove it from the pool's cache list. */
+       simple_lock(&pp->pr_slock);
+       TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
+       simple_unlock(&pp->pr_slock);
+}
+
+static __inline void *
+pcg_get(struct pool_cache_group *pcg)
+{
+       void *object;
+       u_int idx;
+
+       KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
+       idx = --pcg->pcg_avail;
+
+       KASSERT(pcg->pcg_objects[idx] != NULL);
+       object = pcg->pcg_objects[idx];
+       pcg->pcg_objects[idx] = NULL;
+
+       return (object);
+}
+
+static __inline void
+pcg_put(struct pool_cache_group *pcg, void *object)
+{
+       u_int idx;
+
+       KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
+       idx = pcg->pcg_avail++;
+
+       KASSERT(pcg->pcg_objects[idx] == NULL);
+       pcg->pcg_objects[idx] = object;
+}
+
+/*
+ * pool_cache_get:
+ *
+ *     Get an object from a pool cache.
+ */
+void *
+pool_cache_get(struct pool_cache *pc, int flags)
+{
+       struct pool_cache_group *pcg;
+       void *object;
+
+       simple_lock(&pc->pc_slock);
+
+       if ((pcg = pc->pc_allocfrom) == NULL) {
+               for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
+                    pcg = TAILQ_NEXT(pcg, pcg_list)) {
+                       if (pcg->pcg_avail != 0) {
+                               pc->pc_allocfrom = pcg;
+                               goto have_group;



Home | Main Index | Thread Index | Old Index