Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/ufs/ffs use pools for allocating most softdep datastruct...



details:   https://anonhg.NetBSD.org/src/rev/a0832425f58c
branches:  trunk
changeset: 514997:a0832425f58c
user:      chs <chs%NetBSD.org@localhost>
date:      Sat Sep 15 16:33:53 2001 +0000

description:
use pools for allocating most softdep datastructures.  since we want to
allocate memory from kernel_map but some of the objects are freed from
interrupt context, we put objects on a queue instead of freeing them
immediately.  then in softdep_process_worklist() (which is called at
least once per second from the syncer), we process that queue and
free all the objects.  allocating from kernel_map instead of from kmem_map
allows us to have a much larger number of softdeps pending even in
configurations where kmem_map is relatively small.

diffstat:

 sys/ufs/ffs/ffs_softdep.c |  257 ++++++++++++++++++++++++++++++++++-----------
 1 files changed, 194 insertions(+), 63 deletions(-)

diffs (truncated from 451 to 300 lines):

diff -r bb77ea95bb61 -r a0832425f58c sys/ufs/ffs/ffs_softdep.c
--- a/sys/ufs/ffs/ffs_softdep.c Sat Sep 15 16:28:15 2001 +0000
+++ b/sys/ufs/ffs/ffs_softdep.c Sat Sep 15 16:33:53 2001 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: ffs_softdep.c,v 1.15 2001/09/15 16:13:04 chs Exp $     */
+/*     $NetBSD: ffs_softdep.c,v 1.16 2001/09/15 16:33:53 chs Exp $     */
 
 /*
  * Copyright 1998 Marshall Kirk McKusick. All Rights Reserved.
@@ -74,25 +74,40 @@
 /*
  * Mapping of dependency structure types to malloc types.
  */
-#define        D_PAGEDEP       M_PAGEDEP
-#define        D_INODEDEP      M_INODEDEP
-#define        D_NEWBLK        M_NEWBLK
-#define        D_BMSAFEMAP     M_BMSAFEMAP
-#define        D_ALLOCDIRECT   M_ALLOCDIRECT
-#define        D_INDIRDEP      M_INDIRDEP
-#define        D_ALLOCINDIR    M_ALLOCINDIR
-#define        D_FREEFRAG      M_FREEFRAG
-#define        D_FREEBLKS      M_FREEBLKS
-#define        D_FREEFILE      M_FREEFILE
-#define        D_DIRADD        M_DIRADD
-#define        D_MKDIR         M_MKDIR
-#define        D_DIRREM        M_DIRREM
+#define        D_PAGEDEP       1
+#define        D_INODEDEP      2
+#define        D_NEWBLK        3
+#define        D_BMSAFEMAP     4
+#define        D_ALLOCDIRECT   5
+#define        D_INDIRDEP      6
+#define        D_ALLOCINDIR    7
+#define        D_FREEFRAG      8
+#define        D_FREEBLKS      9
+#define        D_FREEFILE      10
+#define        D_DIRADD        11
+#define        D_MKDIR         12
+#define        D_DIRREM        13
+#define D_LAST         13
 /*
- * Names of malloc types.
+ * Names of softdep types.
  */
-extern char *memname[];
-#define TYPENAME(type) ((unsigned)(type) < M_LAST ? memname[type] : "???")
-#define DtoM(type) (type)
+const char *softdep_typenames[] = {
+       "invalid",
+       "pagedep",
+       "inodedep",
+       "newblk",
+       "bmsafemap",
+       "allocdirect",
+       "indirdep",
+       "allocindir",
+       "freefrag",
+       "freeblks",
+       "diradd",
+       "mkdir",
+       "dirrem",
+};
+#define TYPENAME(type) \
+       ((unsigned)(type) < D_LAST ? softdep_typenames[type] : "???")
 /*
  * Finding the current process.
  */
@@ -353,6 +368,94 @@
 }
 
 /*
+ * Memory management.
+ */
+
+static struct pool pagedep_pool;
+static struct pool inodedep_pool;
+static struct pool newblk_pool;
+static struct pool bmsafemap_pool;
+static struct pool allocdirect_pool;
+static struct pool indirdep_pool;
+static struct pool allocindir_pool;
+static struct pool freefrag_pool;
+static struct pool freeblks_pool;
+static struct pool freefile_pool;
+static struct pool diradd_pool;
+static struct pool mkdir_pool;
+static struct pool dirrem_pool;
+
+static __inline void
+softdep_free(struct worklist *item, int type)
+{
+       switch (type) {
+
+       case D_PAGEDEP:
+               pool_put(&pagedep_pool, item);
+               return;
+
+       case D_INODEDEP:
+               pool_put(&inodedep_pool, item);
+               return;
+
+       case D_BMSAFEMAP:
+               pool_put(&bmsafemap_pool, item);
+               return;
+
+       case D_ALLOCDIRECT:
+               pool_put(&allocdirect_pool, item);
+               return;
+
+       case D_INDIRDEP:
+               pool_put(&indirdep_pool, item);
+               return;
+
+       case D_ALLOCINDIR:
+               pool_put(&allocindir_pool, item);
+               return;
+
+       case D_FREEFRAG:
+               pool_put(&freefrag_pool, item);
+               return;
+
+       case D_FREEBLKS:
+               pool_put(&freeblks_pool, item);
+               return;
+
+       case D_FREEFILE:
+               pool_put(&freefile_pool, item);
+               return;
+
+       case D_DIRADD:
+               pool_put(&diradd_pool, item);
+               return;
+
+       case D_MKDIR:
+               pool_put(&mkdir_pool, item);
+               return;
+
+       case D_DIRREM:
+               pool_put(&dirrem_pool, item);
+               return;
+
+       }
+       panic("softdep_free: unknown type %d", type);
+}
+
+struct workhead softdep_tofree;
+
+static __inline void
+softdep_queuefree(struct worklist *item)
+{
+       int s;
+
+       s = splbio();
+       LIST_INSERT_HEAD(&softdep_tofree, item , wk_list);
+       splx(s);
+}
+
+
+/*
  * Worklist queue management.
  * These routines require that the lock be held.
  */
@@ -365,7 +468,7 @@
        (item)->wk_state &= ~ONWORKLIST;        \
        LIST_REMOVE(item, wk_list);             \
 } while (0)
-#define WORKITEM_FREE(item, type) FREE(item, DtoM(type))
+#define WORKITEM_FREE(item, type) softdep_queuefree(item, type)
 
 #else /* DEBUG */
 static void worklist_insert __P((struct workhead *, struct worklist *));
@@ -411,9 +514,7 @@
 
        if (item->wk_state & ONWORKLIST)
                panic("workitem_free: still on list");
-       if (item->wk_type != type)
-               panic("workitem_free: type mismatch");
-       FREE(item, DtoM(type));
+       softdep_queuefree(item);
 }
 #endif /* DEBUG */
 
@@ -497,6 +598,18 @@
        struct worklist *wk;
        struct fs *matchfs;
        int matchcnt;
+       int s;
+
+       /*
+        * First process any items on the delay-free queue.
+        */
+       s = splbio();
+       while ((wk = LIST_FIRST(&softdep_tofree)) != NULL) {
+               LIST_REMOVE(wk, wk_list);
+               softdep_free(wk, wk->wk_type);
+       }
+       splx(s);
+
        /*
         * Record the process identifier of our caller so that we can give
         * this process preferential treatment in request_cleanup below.
@@ -752,8 +865,7 @@
                ACQUIRE_LOCK(&lk);
                goto top;
        }
-       MALLOC(pagedep, struct pagedep *, sizeof(struct pagedep), M_PAGEDEP,
-               M_WAITOK);
+       pagedep = pool_get(&pagedep_pool, PR_WAITOK);
        bzero(pagedep, sizeof(struct pagedep));
        pagedep->pd_list.wk_type = D_PAGEDEP;
        pagedep->pd_mnt = mp;
@@ -829,8 +941,7 @@
                goto top;
        }
        num_inodedep += 1;
-       MALLOC(inodedep, struct inodedep *, sizeof(struct inodedep),
-               M_INODEDEP, M_WAITOK);
+       inodedep = pool_get(&inodedep_pool, PR_WAITOK);
        inodedep->id_list.wk_type = D_INODEDEP;
        inodedep->id_fs = fs;
        inodedep->id_ino = inum;
@@ -891,8 +1002,7 @@
        }
        if (sema_get(&newblk_in_progress, 0) == 0)
                goto top;
-       MALLOC(newblk, struct newblk *, sizeof(struct newblk),
-               M_NEWBLK, M_WAITOK);
+       newblk = pool_get(&newblk_pool, PR_WAITOK);
        newblk->nb_state = 0;
        newblk->nb_fs = fs;
        newblk->nb_newblkno = newblkno;
@@ -928,6 +1038,46 @@
        for (i = 0; i < PCBPHASHSIZE; i++) {
                LIST_INIT(&pcbphashhead[i]);
        }
+
+       pool_init(&pagedep_pool, sizeof(struct pagedep), 0, 0, 0,
+           "pagedeppl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_PAGEDEP);
+       pool_init(&inodedep_pool, sizeof(struct inodedep), 0, 0, 0,
+           "inodedeppl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_INODEDEP);
+       pool_init(&newblk_pool, sizeof(struct newblk), 0, 0, 0,
+           "newblkpl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_NEWBLK);
+       pool_init(&bmsafemap_pool, sizeof(struct bmsafemap), 0, 0, 0,
+           "bmsafemappl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_BMSAFEMAP);
+       pool_init(&allocdirect_pool, sizeof(struct allocdirect), 0, 0, 0,
+           "allocdirectpl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_ALLOCDIRECT);
+       pool_init(&indirdep_pool, sizeof(struct indirdep), 0, 0, 0,
+           "indirdeppl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_INDIRDEP);
+       pool_init(&allocindir_pool, sizeof(struct allocindir), 0, 0, 0,
+           "allocindirpl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_ALLOCINDIR);
+       pool_init(&freefrag_pool, sizeof(struct freefrag), 0, 0, 0,
+           "freefragpl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_FREEFRAG);
+       pool_init(&freeblks_pool, sizeof(struct freeblks), 0, 0, 0,
+           "freeblkspl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_FREEBLKS);
+       pool_init(&freefile_pool, sizeof(struct freefile), 0, 0, 0,
+           "freefilepl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_FREEFILE);
+       pool_init(&diradd_pool, sizeof(struct diradd), 0, 0, 0,
+           "diraddpl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_DIRADD);
+       pool_init(&mkdir_pool, sizeof(struct mkdir), 0, 0, 0,
+           "mkdirpl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_MKDIR);
+       pool_init(&dirrem_pool, sizeof(struct dirrem), 0, 0, 0,
+           "dirrempl", 0, pool_page_alloc_nointr, pool_page_free_nointr,
+           M_DIRREM);
 }
 
 /*
@@ -1142,8 +1292,7 @@
                if (wk->wk_type == D_BMSAFEMAP)
                        return (WK_BMSAFEMAP(wk));
        FREE_LOCK(&lk);
-       MALLOC(bmsafemap, struct bmsafemap *, sizeof(struct bmsafemap),
-               M_BMSAFEMAP, M_WAITOK);
+       bmsafemap = pool_get(&bmsafemap_pool, PR_WAITOK);
        bmsafemap->sm_list.wk_type = D_BMSAFEMAP;
        bmsafemap->sm_list.wk_state = 0;
        bmsafemap->sm_buf = bp;
@@ -1202,8 +1351,7 @@
        struct pagedep *pagedep;
        struct newblk *newblk;
 
-       MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect),
-               M_ALLOCDIRECT, M_WAITOK);
+       adp = pool_get(&allocdirect_pool, PR_WAITOK);
        bzero(adp, sizeof(struct allocdirect));
        adp->ad_list.wk_type = D_ALLOCDIRECT;
        adp->ad_lbn = lbn;
@@ -1234,7 +1382,7 @@
                LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps);



Home | Main Index | Thread Index | Old Index