Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/ufs/lfs - Get rid of unused #ifdefs LFS_NO_PAGEMOVE and ...



details:   https://anonhg.NetBSD.org/src/rev/0d876fbabb41
branches:  trunk
changeset: 544088:0d876fbabb41
user:      perseant <perseant%NetBSD.org@localhost>
date:      Tue Mar 11 02:47:39 2003 +0000

description:
- Get rid of unused #ifdefs LFS_NO_PAGEMOVE and LFS_MALLOC_SUMMARY (both
  always true) and accompanying dead code.

- When constructing write clusters in lfs_writeseg, if the block we are
  about to add is itself a cluster from GOP_WRITE, don't put a cluster
  in a cluster, just write the GOP_WRITE cluster on its own.  This seems
  to represent a slight performance gain on my test machine.

- Charge someone's rusage for writes on LFSes.  It's difficult to tell
  who the "right" process to charge is; just charge whoever triggered
  the write.

diffstat:

 sys/ufs/lfs/lfs.h         |    8 +-
 sys/ufs/lfs/lfs_segment.c |  285 +++++++++++----------------------------------
 sys/ufs/lfs/lfs_subr.c    |   16 +--
 3 files changed, 77 insertions(+), 232 deletions(-)

diffs (truncated from 567 to 300 lines):

diff -r a3975f55c8a6 -r 0d876fbabb41 sys/ufs/lfs/lfs.h
--- a/sys/ufs/lfs/lfs.h Tue Mar 11 00:18:36 2003 +0000
+++ b/sys/ufs/lfs/lfs.h Tue Mar 11 02:47:39 2003 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: lfs.h,v 1.56 2003/03/08 22:14:31 perseant Exp $        */
+/*     $NetBSD: lfs.h,v 1.57 2003/03/11 02:47:39 perseant Exp $        */
 
 /*-
  * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@@ -79,16 +79,10 @@
 #define LFS_IFIND_RETRIES      16
 #define LFS_EAGAIN_FAIL                 /* markv fail with EAGAIN if ino is locked */
 #define LFS_DEBUG_RFW           /* print roll-forward debugging info */
-#define LFS_NO_PAGEMOVE                 /* Use malloc/copy to write clusters */
-#define LFS_AGGRESSIVE_SEGLOCK
 #define LFS_LOGLENGTH 1024
 
 /* #define DEBUG_LFS */                 /* Intensive debugging of LFS subsystem */
 
-#ifdef LFS_NO_PAGEMOVE
-# define LFS_MALLOC_SUMMARY
-#endif
-
 /*
  * Parameters and generic definitions
  */
diff -r a3975f55c8a6 -r 0d876fbabb41 sys/ufs/lfs/lfs_segment.c
--- a/sys/ufs/lfs/lfs_segment.c Tue Mar 11 00:18:36 2003 +0000
+++ b/sys/ufs/lfs/lfs_segment.c Tue Mar 11 02:47:39 2003 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: lfs_segment.c,v 1.108 2003/03/08 21:46:05 perseant Exp $       */
+/*     $NetBSD: lfs_segment.c,v 1.109 2003/03/11 02:47:40 perseant Exp $       */
 
 /*-
  * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@@ -71,7 +71,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.108 2003/03/08 21:46:05 perseant Exp $");
+__KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.109 2003/03/11 02:47:40 perseant Exp $");
 
 #define ivndebug(vp,str) printf("ino %d: %s\n",VTOI(vp)->i_number,(str))
 
@@ -115,7 +115,6 @@
 static void lfs_super_aiodone(struct buf *);
 static void lfs_cluster_aiodone(struct buf *);
 static void lfs_cluster_callback(struct buf *);
-static struct buf **lookahead_pagemove(struct buf **, int, size_t *);
 
 /*
  * Determine if it's OK to start a partial in this segment, or if we need
@@ -1174,11 +1173,13 @@
                        bwrite(bp);
                } else {
 #ifdef DIAGNOSTIC
+# ifdef LFS_USE_B_INVAL
                        if ((bp->b_flags & (B_CALL|B_INVAL)) == B_INVAL) {
                                printf("lfs_gather: lbn %" PRId64 " is "
                                        "B_INVAL\n", bp->b_lblkno);
                                VOP_PRINT(bp->b_vp);
                        }
+# endif /* LFS_USE_B_INVAL */
                        if (!(bp->b_flags & B_DELWRI))
                                panic("lfs_gather: bp not B_DELWRI");
                        if (!(bp->b_flags & B_LOCKED)) {
@@ -1512,16 +1513,9 @@
 
        /* Get a new buffer for SEGSUM and enter it into the buffer list. */
        sp->cbpp = sp->bpp;
-#ifdef LFS_MALLOC_SUMMARY
        sbp = *sp->cbpp = lfs_newbuf(fs, VTOI(fs->lfs_ivnode)->i_devvp,
                                     fsbtodb(fs, fs->lfs_offset), fs->lfs_sumsize, LFS_NB_SUMMARY);
        sp->segsum = (*sp->cbpp)->b_data;
-#else
-       sbp = *sp->cbpp = getblk(VTOI(fs->lfs_ivnode)->i_devvp,
-                                fsbtodb(fs, fs->lfs_offset), NBPG, 0, 0);
-       /* memset(sbp->b_data, 0x5a, NBPG); */
-       sp->segsum = (*sp->cbpp)->b_data + NBPG - fs->lfs_sumsize;
-#endif
        memset(sp->segsum, 0, fs->lfs_sumsize);
        sp->start_bpp = ++sp->cbpp;
        fs->lfs_offset += btofsb(fs, fs->lfs_sumsize);
@@ -1541,10 +1535,6 @@
        sp->seg_bytes_left -= fs->lfs_sumsize;
        sp->sum_bytes_left = fs->lfs_sumsize - SEGSUM_SIZE(fs);
 
-#ifndef LFS_MALLOC_SUMMARY
-       LFS_LOCK_BUF(sbp);
-       brelse(sbp);
-#endif
        return (repeat);
 }
 
@@ -1601,31 +1591,6 @@
        }
 }
 
-static struct buf **
-lookahead_pagemove(struct buf **bpp, int nblocks, size_t *size)
-{
-       size_t maxsize;
-#ifndef LFS_NO_PAGEMOVE
-       struct buf *bp;
-#endif
-
-       maxsize = *size;
-       *size = 0;
-#ifdef LFS_NO_PAGEMOVE
-       return bpp;
-#else
-       while((bp = *bpp) != NULL && *size < maxsize && nblocks--) {
-               if(LFS_IS_MALLOC_BUF(bp))
-                       return bpp;
-               if(bp->b_bcount % NBPG)
-                       return bpp;
-               *size += bp->b_bcount;
-               ++bpp;
-       }
-       return NULL;
-#endif
-}
-
 #define BQUEUES 4 /* XXX */
 #define BQ_EMPTY 3 /* XXX */
 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
@@ -1707,7 +1672,7 @@
 int
 lfs_writeseg(struct lfs *fs, struct segment *sp)
 {
-       struct buf **bpp, *bp, *cbp, *newbp, **pmlastbpp;
+       struct buf **bpp, *bp, *cbp, *newbp;
        SEGUSE *sup;
        SEGSUM *ssp;
        dev_t i_dev;
@@ -1722,9 +1687,6 @@
        struct vnode *devvp;
        char *p;
        struct vnode *vp;
-       struct inode *ip;
-       size_t pmsize;
-       int use_pagemove;
        int32_t *daddrp;        /* XXX ondisk32 */
        int changed;
 #if defined(DEBUG) && defined(LFS_PROPELLER)
@@ -1744,11 +1706,6 @@
        if ((nblocks = sp->cbpp - sp->bpp) == 1)
                return (0);
        
-#if 0
-       printf("lfs_writeseg: %d blocks at 0x%x\n", nblocks,
-               dbtofsb(fs, sp->bpp[0]->b_blkno));
-#endif
-
        i_dev = VTOI(fs->lfs_ivnode)->i_dev;
        devvp = VTOI(fs->lfs_ivnode)->i_devvp;
 
@@ -1930,6 +1887,7 @@
                /* Loop through gop_write cluster blocks */
                for (byteoffset = 0; byteoffset < (*bpp)->b_bcount;
                     byteoffset += fs->lfs_bsize) {
+#ifdef LFS_USE_B_INVAL
                        if (((*bpp)->b_flags & (B_CALL | B_INVAL)) ==
                            (B_CALL | B_INVAL)) {
                                if (copyin((caddr_t)(*bpp)->b_saveaddr +
@@ -1939,7 +1897,9 @@
                                                VTOI((*bpp)->b_vp)->i_number,
                                                (*bpp)->b_lblkno);
                                }
-                       } else {
+                       } else
+#endif /* LFS_USE_B_INVAL */
+                       {
                                memcpy(dp, (*bpp)->b_data + byteoffset,
                                       el_size);
                        }
@@ -1953,10 +1913,6 @@
                ssp->ss_serial = ++fs->lfs_serial;
                ssp->ss_ident  = fs->lfs_ident;
        }
-#ifndef LFS_MALLOC_SUMMARY
-       /* Set the summary block busy too */
-       (*(sp->bpp))->b_flags |= B_BUSY;
-#endif
        ssp->ss_datasum = cksum(datap, dp - datap);
        ssp->ss_sumsum =
            cksum(&ssp->ss_datasum, fs->lfs_sumsize - sizeof(ssp->ss_sumsum));
@@ -1973,11 +1929,12 @@
 
        /*
         * When we simply write the blocks we lose a rotation for every block
-        * written.  To avoid this problem, we use pagemove to cluster
-        * the buffers into a chunk and write the chunk.  CHUNKSIZE is the
-        * largest size I/O devices can handle.
+        * written.  To avoid this problem, we cluster the buffers into a
+        * chunk and write the chunk.  MAXPHYS is the largest size I/O
+        * devices can handle, use that for the size of the chunks.
         *
-        * XXX - right now MAXPHYS is only 64k; could it be larger?
+        * Blocks that are already clusters (from GOP_WRITE), however, we
+        * don't bother to copy into other clusters.
         */
 
 #define CHUNKSIZE MAXPHYS
@@ -1992,43 +1949,7 @@
                cbp->b_flags |= B_ASYNC | B_BUSY;
                cbp->b_bcount = 0;
 
-               /*
-                * Find out if we can use pagemove to build the cluster,
-                * or if we are stuck using malloc/copy.  If this is the
-                * first cluster, set the shift flag (see below).
-                */
-               pmsize = CHUNKSIZE;
-               use_pagemove = 0;
-               if(bpp == sp->bpp) {
-                       /* Summary blocks have to get special treatment */
-                       pmlastbpp = lookahead_pagemove(bpp + 1, i - 1, &pmsize);
-                       if(pmsize >= CHUNKSIZE - fs->lfs_sumsize ||
-                          pmlastbpp == NULL) {
-                               use_pagemove = 1;
-                               cl->flags |= LFS_CL_SHIFT;
-                       } else {
-                               /*
-                                * If we're not using pagemove, we have
-                                * to copy the summary down to the bottom
-                                * end of the block.
-                                */
-#ifndef LFS_MALLOC_SUMMARY
-                               memcpy((*bpp)->b_data, (*bpp)->b_data +
-                                      NBPG - fs->lfs_sumsize,
-                                      fs->lfs_sumsize);
-#endif /* LFS_MALLOC_SUMMARY */
-                       }
-               } else {
-                       pmlastbpp = lookahead_pagemove(bpp, i, &pmsize);
-                       if(pmsize >= CHUNKSIZE || pmlastbpp == NULL) {
-                               use_pagemove = 1;
-                       }
-               }
-               if(use_pagemove == 0) {
-                       cl->flags |= LFS_CL_MALLOC;
-                       cl->olddata = cbp->b_data;
-                       cbp->b_data = lfs_malloc(fs, CHUNKSIZE, LFS_NB_CLUSTER);
-               }
+               cl->olddata = cbp->b_data;
 #if defined(DEBUG) && defined(DIAGNOSTIC)
                if (bpp - sp->bpp > (fs->lfs_sumsize - SEGSUM_SIZE(fs))
                    / sizeof(int32_t)) {
@@ -2037,27 +1958,33 @@
                if (bpp - sp->bpp > fs->lfs_ssize / fs->lfs_fsize) {
                        panic("lfs_writeseg: theoretical bpp overwrite");
                }
-               if(dtosn(fs, dbtofsb(fs, (*bpp)->b_blkno + btodb((*bpp)->b_bcount - 1))) !=
-                  dtosn(fs, dbtofsb(fs, cbp->b_blkno))) {
-                       printf("block at %" PRId64 " (%" PRIu32 "), "
-                              "cbp at %" PRId64 " (%" PRIu32 ")\n",
-                               (*bpp)->b_blkno, dtosn(fs, dbtofsb(fs, (*bpp)->b_blkno)),
-                              cbp->b_blkno, dtosn(fs, dbtofsb(fs, cbp->b_blkno)));
-                       panic("lfs_writeseg: Segment overwrite");
-               }
 #endif
 
                /*
                 * Construct the cluster.
                 */
                ++fs->lfs_iocount;
-
-               for (p = cbp->b_data; i && cbp->b_bcount < CHUNKSIZE; i--) {
+               while(i && cbp->b_bcount < CHUNKSIZE) {
                        bp = *bpp;
 
                        if (bp->b_bcount > (CHUNKSIZE - cbp->b_bcount))
                                break;
+                       if (cbp->b_bcount > 0 && !(cl->flags & LFS_CL_MALLOC))
+                               break;
 
+                       /* Clusters from GOP_WRITE are expedited */
+                       if (bp->b_bcount > fs->lfs_bsize) {
+                               if (cbp->b_bcount > 0)
+                                       /* Put in its own buffer */
+                                       break;
+                               else {
+                                       cbp->b_data = bp->b_data;
+                               }
+                       } else if (cbp->b_bcount == 0) {
+                               p = cbp->b_data = lfs_malloc(fs, CHUNKSIZE,
+                                                            LFS_NB_CLUSTER);
+                               cl->flags |= LFS_CL_MALLOC;
+                       }
 #ifdef DIAGNOSTIC
                        if (dtosn(fs, dbtofsb(fs, bp->b_blkno +
                                              btodb(bp->b_bcount - 1))) !=
@@ -2069,6 +1996,7 @@
                        }
 #endif
 
+#ifdef LFS_USE_B_INVAL
                        /*
                         * Fake buffers from the cleaner are marked as B_INVAL.



Home | Main Index | Thread Index | Old Index