Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/yamt-pagecache]: src/sys remove pg->listq and uobj->memq
details: https://anonhg.NetBSD.org/src/rev/7beae76a96d1
branches: yamt-pagecache
changeset: 770819:7beae76a96d1
user: yamt <yamt%NetBSD.org@localhost>
date: Sun Nov 06 22:05:00 2011 +0000
description:
remove pg->listq and uobj->memq
diffstat:
sys/nfs/nfs_subs.c | 16 +++++++++--
sys/ufs/lfs/lfs_vnops.c | 70 +++++++++---------------------------------------
sys/uvm/uvm_aobj.c | 48 ++++----------------------------
sys/uvm/uvm_loan.c | 8 ++--
sys/uvm/uvm_object.c | 16 ++++++++--
sys/uvm/uvm_object.h | 7 ++--
sys/uvm/uvm_page.c | 30 +++++++--------------
sys/uvm/uvm_page.h | 19 ++++++-------
sys/uvm/uvm_pglist.c | 8 ++--
9 files changed, 75 insertions(+), 147 deletions(-)
diffs (truncated from 580 to 300 lines):
diff -r f8f7ff8a1dab -r 7beae76a96d1 sys/nfs/nfs_subs.c
--- a/sys/nfs/nfs_subs.c Sun Nov 06 22:04:07 2011 +0000
+++ b/sys/nfs/nfs_subs.c Sun Nov 06 22:05:00 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: nfs_subs.c,v 1.221.2.1 2011/11/02 21:53:59 yamt Exp $ */
+/* $NetBSD: nfs_subs.c,v 1.221.2.2 2011/11/06 22:05:01 yamt Exp $ */
/*
* Copyright (c) 1989, 1993
@@ -70,7 +70,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nfs_subs.c,v 1.221.2.1 2011/11/02 21:53:59 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nfs_subs.c,v 1.221.2.2 2011/11/06 22:05:01 yamt Exp $");
#ifdef _KERNEL_OPT
#include "opt_nfs.h"
@@ -100,6 +100,7 @@
#include <sys/atomic.h>
#include <uvm/uvm.h>
+#include <uvm/uvm_page_array.h>
#include <nfs/rpcv2.h>
#include <nfs/nfsproto.h>
@@ -1745,6 +1746,9 @@
rw_enter(&nmp->nm_writeverflock, RW_WRITER);
mutex_enter(&mntvnode_lock);
TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
+ struct uvm_page_array a;
+ voff_t off;
+
KASSERT(vp->v_mount == mp);
if (vp->v_type != VREG)
continue;
@@ -1756,9 +1760,15 @@
np = VTONFS(vp);
np->n_commitflags &=
~(NFS_COMMIT_PUSH_VALID | NFS_COMMIT_PUSHED_VALID);
- TAILQ_FOREACH(pg, &vp->v_uobj.memq, listq.queue) {
+ uvm_page_array_init(&a);
+ off = 0;
+ while ((pg = uvm_page_array_fill_and_peek(&a, &vp->v_uobj, off,
+ false)) != NULL) {
pg->flags &= ~PG_NEEDCOMMIT;
+ uvm_page_array_advance(&a);
+ off = pg->offset + PAGE_SIZE;
}
+ uvm_page_array_fini(&a);
mutex_exit(vp->v_interlock);
}
mutex_exit(&mntvnode_lock);
diff -r f8f7ff8a1dab -r 7beae76a96d1 sys/ufs/lfs/lfs_vnops.c
--- a/sys/ufs/lfs/lfs_vnops.c Sun Nov 06 22:04:07 2011 +0000
+++ b/sys/ufs/lfs/lfs_vnops.c Sun Nov 06 22:05:00 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: lfs_vnops.c,v 1.238.2.1 2011/11/02 21:54:00 yamt Exp $ */
+/* $NetBSD: lfs_vnops.c,v 1.238.2.2 2011/11/06 22:05:01 yamt Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
@@ -60,7 +60,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.238.2.1 2011/11/02 21:54:00 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.238.2.2 2011/11/06 22:05:01 yamt Exp $");
#ifdef _KERNEL_OPT
#include "opt_compat_netbsd.h"
@@ -1836,10 +1836,8 @@
off_t startoffset, off_t endoffset, off_t blkeof,
int flags, int checkfirst, struct vm_page **pgp)
{
- int by_list;
- struct vm_page *curpg = NULL; /* XXX: gcc */
struct vm_page *pgs[MAXBSIZE / PAGE_SIZE], *pg;
- off_t soff = 0; /* XXX: gcc */
+ off_t soff;
voff_t off;
int i;
int nonexistent;
@@ -1851,39 +1849,10 @@
ASSERT_MAYBE_SEGLOCK(fs);
top:
- by_list = (vp->v_uobj.uo_npages <=
- ((endoffset - startoffset) >> PAGE_SHIFT) *
- UVM_PAGE_TREE_PENALTY);
any_dirty = 0;
- if (by_list) {
- curpg = TAILQ_FIRST(&vp->v_uobj.memq);
- } else {
- soff = startoffset;
- }
- while (by_list || soff < MIN(blkeof, endoffset)) {
- if (by_list) {
- /*
- * Find the first page in a block. Skip
- * blocks outside our area of interest or beyond
- * the end of file.
- */
- KASSERT(curpg == NULL
- || (curpg->flags & PG_MARKER) == 0);
- if (pages_per_block > 1) {
- while (curpg &&
- ((curpg->offset & fs->lfs_bmask) ||
- curpg->offset >= vp->v_size ||
- curpg->offset >= endoffset)) {
- curpg = TAILQ_NEXT(curpg, listq.queue);
- KASSERT(curpg == NULL ||
- (curpg->flags & PG_MARKER) == 0);
- }
- }
- if (curpg == NULL)
- break;
- soff = curpg->offset;
- }
+ soff = startoffset;
+ while (soff < MIN(blkeof, endoffset)) {
/*
* Mark all pages in extended range busy; find out if any
@@ -1891,15 +1860,11 @@
*/
nonexistent = dirty = 0;
for (i = 0; i == 0 || i < pages_per_block; i++) {
- if (by_list && pages_per_block <= 1) {
- pgs[i] = pg = curpg;
- } else {
- off = soff + (i << PAGE_SHIFT);
- pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off);
- if (pg == NULL) {
- ++nonexistent;
- continue;
- }
+ off = soff + (i << PAGE_SHIFT);
+ pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off);
+ if (pg == NULL) {
+ ++nonexistent;
+ continue;
}
KASSERT(pg != NULL);
@@ -1936,11 +1901,7 @@
dirty += tdirty;
}
if (pages_per_block > 0 && nonexistent >= pages_per_block) {
- if (by_list) {
- curpg = TAILQ_NEXT(curpg, listq.queue);
- } else {
- soff += fs->lfs_bsize;
- }
+ soff += fs->lfs_bsize;
continue;
}
@@ -1981,11 +1942,7 @@
if (checkfirst && any_dirty)
break;
- if (by_list) {
- curpg = TAILQ_NEXT(curpg, listq.queue);
- } else {
- soff += MAX(PAGE_SIZE, fs->lfs_bsize);
- }
+ soff += MAX(PAGE_SIZE, fs->lfs_bsize);
}
return any_dirty;
@@ -2074,8 +2031,7 @@
* If there are no pages, don't do anything.
*/
if (vp->v_uobj.uo_npages == 0) {
- if (TAILQ_EMPTY(&vp->v_uobj.memq) &&
- (vp->v_iflag & VI_ONWORKLST) &&
+ if ((vp->v_iflag & VI_ONWORKLST) &&
LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
vp->v_iflag &= ~VI_WRMAPDIRTY;
vn_syncer_remove_from_worklist(vp);
diff -r f8f7ff8a1dab -r 7beae76a96d1 sys/uvm/uvm_aobj.c
--- a/sys/uvm/uvm_aobj.c Sun Nov 06 22:04:07 2011 +0000
+++ b/sys/uvm/uvm_aobj.c Sun Nov 06 22:05:00 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_aobj.c,v 1.116.2.2 2011/11/06 10:15:11 yamt Exp $ */
+/* $NetBSD: uvm_aobj.c,v 1.116.2.3 2011/11/06 22:05:00 yamt Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116.2.2 2011/11/06 10:15:11 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116.2.3 2011/11/06 22:05:00 yamt Exp $");
#include "opt_uvmhist.h"
@@ -147,7 +147,7 @@
*/
struct uvm_aobj {
- struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
+ struct uvm_object u_obj; /* has: lock, pgops, #pages, #refs */
pgoff_t u_pages; /* number of pages in entire object */
int u_flags; /* the flags (see uvm_aobj.h) */
int *u_swslots; /* array of offset->swapslot mappings */
@@ -664,16 +664,8 @@
uvm_page_array_init(&a);
mutex_enter(&uvm_pageqlock);
- while (/*CONSTCOND*/true) {
- pg = uvm_page_array_peek(&a);
- if (pg == NULL) {
- int error = uvm_page_array_fill(&a, uobj, 0, false);
- if (error != 0) {
- break;
- }
- pg = uvm_page_array_peek(&a);
- KASSERT(pg != NULL);
- }
+ while ((pg = uvm_page_array_fill_and_peek(&a, uobj, 0, false))
+ != NULL) {
uvm_page_array_advance(&a);
pmap_page_protect(pg, VM_PROT_NONE);
if (pg->flags & PG_BUSY) {
@@ -712,30 +704,12 @@
* or block.
* => if PGO_ALLPAGE is set, then all pages in the object are valid targets
* for flushing.
- * => NOTE: we rely on the fact that the object's memq is a TAILQ and
- * that new pages are inserted on the tail end of the list. thus,
- * we can make a complete pass through the object in one go by starting
- * at the head and working towards the tail (new pages are put in
- * front of us).
* => NOTE: we are allowed to lock the page queues, so the caller
* must not be holding the lock on them [e.g. pagedaemon had
* better not call us with the queues locked]
* => we return 0 unless we encountered some sort of I/O error
* XXXJRT currently never happens, as we never directly initiate
* XXXJRT I/O
- *
- * note on page traversal:
- * we can traverse the pages in an object either by going down the
- * linked list in "uobj->memq", or we can go over the address range
- * by page doing hash table lookups for each address. depending
- * on how many pages are in the object it may be cheaper to do one
- * or the other. we set "by_list" to true if we are using memq.
- * if the cost of a hash lookup was equal to the cost of the list
- * traversal we could compare the number of pages in the start->stop
- * range to the total number of pages in the object. however, it
- * seems that a hash table lookup is more expensive than the linked
- * list traversal, so we multiply the number of pages in the
- * start->stop range by a penalty which we define below.
*/
static int
@@ -783,16 +757,8 @@
/* locked: uobj */
uvm_page_array_init(&a);
curoff = start;
- while (curoff < stop) {
- pg = uvm_page_array_peek(&a);
- if (pg == NULL) {
- int error = uvm_page_array_fill(&a, uobj, curoff,
- false);
- if (error != 0) {
- break;
- }
- pg = uvm_page_array_peek(&a);
- }
+ while ((pg = uvm_page_array_fill_and_peek(&a, uobj, curoff, false)) !=
+ NULL) {
if (pg->offset >= stop) {
break;
}
diff -r f8f7ff8a1dab -r 7beae76a96d1 sys/uvm/uvm_loan.c
--- a/sys/uvm/uvm_loan.c Sun Nov 06 22:04:07 2011 +0000
+++ b/sys/uvm/uvm_loan.c Sun Nov 06 22:05:00 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_loan.c,v 1.81.2.1 2011/11/02 21:54:01 yamt Exp $ */
+/* $NetBSD: uvm_loan.c,v 1.81.2.2 2011/11/06 22:05:00 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81.2.1 2011/11/02 21:54:01 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81.2.2 2011/11/06 22:05:00 yamt Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -845,8 +845,8 @@
* first, get ahold of our single zero page.
*/
Home |
Main Index |
Thread Index |
Old Index