Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/yamt-pagecache]: src/sys byebye PG_HOLE as it turned out to be unnecessary.



details:   https://anonhg.NetBSD.org/src/rev/6a7d4128c7f3
branches:  yamt-pagecache
changeset: 770874:6a7d4128c7f3
user:      yamt <yamt%NetBSD.org@localhost>
date:      Fri Feb 17 08:18:56 2012 +0000

description:
byebye PG_HOLE as it turned out to be unnecessary.

diffstat:

 sys/miscfs/genfs/genfs_io.c |  22 +++++++++++-----------
 sys/ufs/ufs/ufs_inode.c     |   8 ++++----
 sys/uvm/uvm_bio.c           |   8 ++++----
 sys/uvm/uvm_page.c          |   6 +++---
 sys/uvm/uvm_page.h          |  10 ++++------
 sys/uvm/uvm_vnode.c         |  10 +++++-----
 6 files changed, 31 insertions(+), 33 deletions(-)

diffs (263 lines):

diff -r 9b7b623e3833 -r 6a7d4128c7f3 sys/miscfs/genfs/genfs_io.c
--- a/sys/miscfs/genfs/genfs_io.c       Fri Feb 17 08:16:55 2012 +0000
+++ b/sys/miscfs/genfs/genfs_io.c       Fri Feb 17 08:18:56 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: genfs_io.c,v 1.53.2.12 2012/02/05 08:23:41 yamt Exp $  */
+/*     $NetBSD: genfs_io.c,v 1.53.2.13 2012/02/17 08:18:57 yamt Exp $  */
 
 /*
  * Copyright (c) 1982, 1986, 1989, 1993
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53.2.12 2012/02/05 08:23:41 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53.2.13 2012/02/17 08:18:57 yamt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -377,7 +377,7 @@
                         * it's caller's responsibility to allocate blocks
                         * beforehand for the overwrite case.
                         */
-                       pg->flags &= ~(PG_RDONLY|PG_HOLE);
+                       pg->flags &= ~PG_RDONLY;
                        /*
                         * mark the page DIRTY.
                         * otherwise another thread can do putpages and pull
@@ -399,7 +399,7 @@
                struct vm_page *pg = pgs[ridx + i];
 
                if ((pg->flags & PG_FAKE) ||
-                   (memwrite && (pg->flags & (PG_RDONLY|PG_HOLE)) != 0)) {
+                   (memwrite && (pg->flags & PG_RDONLY) != 0)) {
                        break;
                }
        }
@@ -527,7 +527,7 @@
                        size_t b;
 
                        KASSERT((offset & (PAGE_SIZE - 1)) == 0);
-                       if ((pgs[pidx]->flags & PG_HOLE)) {
+                       if ((pgs[pidx]->flags & PG_RDONLY)) {
                                sawhole = true;
                        }
                        b = MIN(PAGE_SIZE, bytes);
@@ -581,13 +581,13 @@
                /*
                 * if this block isn't allocated, zero it instead of
                 * reading it.  unless we are going to allocate blocks,
-                * mark the pages we zeroed PG_HOLE.
+                * mark the pages we zeroed PG_RDONLY.
                 */
 
                if (blkno == (daddr_t)-1) {
                        int holepages = (round_page(offset + iobytes) -
                            trunc_page(offset)) >> PAGE_SHIFT;
-                       UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
+                       UVMHIST_LOG(ubchist, "lbn 0x%x -> RDONLY", lbn,0,0,0);
 
                        sawhole = true;
                        memset((char *)kva + (offset - startoffset), 0,
@@ -597,7 +597,7 @@
                        if (!blockalloc) {
                                mutex_enter(uobj->vmobjlock);
                                for (i = 0; i < holepages; i++) {
-                                       pgs[pidx + i]->flags |= PG_HOLE;
+                                       pgs[pidx + i]->flags |= PG_RDONLY;
                                }
                                mutex_exit(uobj->vmobjlock);
                        }
@@ -650,7 +650,7 @@
 
        /*
         * if this we encountered a hole then we have to do a little more work.
-        * if blockalloc is false, we marked the page PG_HOLE so that future
+        * if blockalloc is false, we marked the page PG_RDONLY so that future
         * write accesses to the page will fault again.
         * if blockalloc is true, we must make sure that the backing store for
         * the page is completely allocated while the pages are locked.
@@ -669,7 +669,7 @@
                                if (pg == NULL) {
                                        continue;
                                }
-                               pg->flags &= ~PG_HOLE;
+                               pg->flags &= ~PG_RDONLY;
                                uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
                                UVMHIST_LOG(ubchist, "mark dirty pg %p",
                                    pg,0,0,0);
@@ -734,7 +734,7 @@
                        KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
                        pg->flags &= ~PG_FAKE;
                }
-               KASSERT(!blockalloc || (pg->flags & PG_HOLE) == 0);
+               KASSERT(!blockalloc || (pg->flags & PG_RDONLY) == 0);
                if (i < ridx || i >= ridx + orignmempages || async) {
                        UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
                            pg, pg->offset,0,0);
diff -r 9b7b623e3833 -r 6a7d4128c7f3 sys/ufs/ufs/ufs_inode.c
--- a/sys/ufs/ufs/ufs_inode.c   Fri Feb 17 08:16:55 2012 +0000
+++ b/sys/ufs/ufs/ufs_inode.c   Fri Feb 17 08:18:56 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: ufs_inode.c,v 1.88.2.1 2011/11/02 21:54:00 yamt Exp $  */
+/*     $NetBSD: ufs_inode.c,v 1.88.2.2 2012/02/17 08:18:56 yamt Exp $  */
 
 /*
  * Copyright (c) 1991, 1993
@@ -37,7 +37,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ufs_inode.c,v 1.88.2.1 2011/11/02 21:54:00 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ufs_inode.c,v 1.88.2.2 2012/02/17 08:18:56 yamt Exp $");
 
 #if defined(_KERNEL_OPT)
 #include "opt_ffs.h"
@@ -279,7 +279,7 @@
 
        /*
         * if the allocation succeeded, mark all the pages dirty
-        * and clear PG_HOLE on any pages that are now fully backed
+        * and clear PG_RDONLY on any pages that are now fully backed
         * by disk blocks.  if the allocation failed, we do not invalidate
         * the pages since they might have already existed and been dirty,
         * in which case we need to keep them around.  if we created the pages,
@@ -295,7 +295,7 @@
                if (!error) {
                        if (off <= pagestart + (i << PAGE_SHIFT) &&
                            pagestart + ((i + 1) << PAGE_SHIFT) <= eob) {
-                               pgs[i]->flags &= ~PG_HOLE;
+                               pgs[i]->flags &= ~PG_RDONLY;
                        }
                        uvm_pagemarkdirty(pgs[i], UVM_PAGE_STATUS_DIRTY);
                }
diff -r 9b7b623e3833 -r 6a7d4128c7f3 sys/uvm/uvm_bio.c
--- a/sys/uvm/uvm_bio.c Fri Feb 17 08:16:55 2012 +0000
+++ b/sys/uvm/uvm_bio.c Fri Feb 17 08:18:56 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_bio.c,v 1.79.2.1 2011/11/02 21:54:00 yamt Exp $    */
+/*     $NetBSD: uvm_bio.c,v 1.79.2.2 2012/02/17 08:18:57 yamt Exp $    */
 
 /*
  * Copyright (c) 1998 Chuck Silvers.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.79.2.1 2011/11/02 21:54:00 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.79.2.2 2012/02/17 08:18:57 yamt Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_ubc.h"
@@ -265,13 +265,13 @@
 
        /*
         * Note that a page whose backing store is partially allocated
-        * is marked as PG_HOLE.
+        * is marked as PG_RDONLY.
         *
         * it's a responsibility of ubc_alloc's caller to allocate backing
         * blocks before writing to the window.
         */
 
-       KASSERT((pg->flags & PG_HOLE) == 0 ||
+       KASSERT((pg->flags & PG_RDONLY) == 0 ||
            (access_type & VM_PROT_WRITE) == 0 ||
            pg->offset < umap->writeoff ||
            pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
diff -r 9b7b623e3833 -r 6a7d4128c7f3 sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c        Fri Feb 17 08:16:55 2012 +0000
+++ b/sys/uvm/uvm_page.c        Fri Feb 17 08:18:56 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_page.c,v 1.178.2.12 2012/01/04 16:31:17 yamt Exp $ */
+/*     $NetBSD: uvm_page.c,v 1.178.2.13 2012/02/17 08:18:57 yamt Exp $ */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.12 2012/01/04 16:31:17 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.13 2012/02/17 08:18:57 yamt Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -2110,7 +2110,7 @@
 
        KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
        KASSERT(uobj != NULL || mutex_owned(pg->uanon->an_lock));
-       if ((pg->flags & (PG_RDONLY|PG_HOLE)) != 0) {
+       if ((pg->flags & PG_RDONLY) != 0) {
                return true;
        }
        if (uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
diff -r 9b7b623e3833 -r 6a7d4128c7f3 sys/uvm/uvm_page.h
--- a/sys/uvm/uvm_page.h        Fri Feb 17 08:16:55 2012 +0000
+++ b/sys/uvm/uvm_page.h        Fri Feb 17 08:18:56 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_page.h,v 1.73.2.8 2011/11/30 14:33:47 yamt Exp $   */
+/*     $NetBSD: uvm_page.h,v 1.73.2.9 2012/02/17 08:18:57 yamt Exp $   */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -160,9 +160,8 @@
  * is only set when the page is on no queues, and is cleared when the page
  * is placed on the free list.
  *
- * PG_RDONLY and PG_HOLE acts like a "read-only count".  ie. either of
- * them is set, the page should not be mapped writably.  typically
- * they are set by pgo_get to inform the fault handler.
+ * PG_RDONLY is used to indicate that the page should not be mapped writably.
+ * typically they are set by pgo_get to inform the fault handler.
  *
  * if you want to renumber PG_CLEAN and PG_DIRTY, check __CTASSERTs in
  * uvm_page_status.c first.
@@ -178,7 +177,6 @@
 #define        PG_RDONLY       0x0080          /* page must be mapped read-only */
 #define        PG_ZERO         0x0100          /* page is pre-zero'd */
 #define        PG_TABLED       0x0200          /* page is in VP table  */
-#define        PG_HOLE         0x0400          /* XXX */
 
 #define PG_PAGER1      0x1000          /* pager-specific flag */
 #define PG_PAGER2      0x2000          /* pager-specific flag */
@@ -186,7 +184,7 @@
 #define        UVM_PGFLAGBITS \
        "\20\1CLEAN\2DIRTY\3BUSY\4WANTED" \
        "\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
-       "\11ZERO\12TABLED\13HOLE" \
+       "\11ZERO\12TABLED" \
        "\15PAGER1\16PAGER2"
 
 #define PQ_FREE                0x0001          /* page is on free list */
diff -r 9b7b623e3833 -r 6a7d4128c7f3 sys/uvm/uvm_vnode.c
--- a/sys/uvm/uvm_vnode.c       Fri Feb 17 08:16:55 2012 +0000
+++ b/sys/uvm/uvm_vnode.c       Fri Feb 17 08:18:56 2012 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_vnode.c,v 1.97.2.4 2012/01/18 02:09:06 yamt Exp $  */
+/*     $NetBSD: uvm_vnode.c,v 1.97.2.5 2012/02/17 08:18:57 yamt Exp $  */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -45,7 +45,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.4 2012/01/18 02:09:06 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.5 2012/02/17 08:18:57 yamt Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -318,9 +318,9 @@
                        continue;
                }
 
-               /* skip PG_RDONLY and PG_HOLE pages if requested */
-               if ((flags & UFP_NORDONLY) &&
-                   (pg->flags & (PG_RDONLY|PG_HOLE))) {
+               /* skip PG_RDONLY pages if requested */
+               if ((flags & UFP_NORDONLY) != 0 &&
+                   (pg->flags & PG_RDONLY) != 0) {
                        UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
                        goto skip;
                }



Home | Main Index | Thread Index | Old Index