Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/chs-ubc2]: src/sys/miscfs/genfs yet another major rework of the generic ...



details:   https://anonhg.NetBSD.org/src/rev/3562b32b9aae
branches:  chs-ubc2
changeset: 471396:3562b32b9aae
user:      chs <chs%NetBSD.org@localhost>
date:      Sun Jul 11 05:56:38 1999 +0000

description:
yet another major rework of the generic getpages.
we now do the block allocations for allocating getpages operations
after reading the pages.  for nested i/os, use b_resid rather than
b_bcount to track the amount left to go.   return values for
getpages/putpages are now unix errnos rather than VM_PAGER_*.
readahead is gone again for the moment.

diffstat:

 sys/miscfs/genfs/genfs_vnops.c |  381 +++++++++++++++++++++++-----------------
 1 files changed, 222 insertions(+), 159 deletions(-)

diffs (truncated from 572 to 300 lines):

diff -r 60a464dc8ca1 -r 3562b32b9aae sys/miscfs/genfs/genfs_vnops.c
--- a/sys/miscfs/genfs/genfs_vnops.c    Sun Jul 11 05:52:10 1999 +0000
+++ b/sys/miscfs/genfs/genfs_vnops.c    Sun Jul 11 05:56:38 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: genfs_vnops.c,v 1.11.4.1 1999/07/04 01:44:43 chs Exp $ */
+/*     $NetBSD: genfs_vnops.c,v 1.11.4.2 1999/07/11 05:56:38 chs Exp $ */
 
 /*
  * Copyright (c) 1982, 1986, 1989, 1993
@@ -402,18 +402,19 @@
                int a_flags;
        } */ *ap = v;
 
-       off_t offset, origoffset;
+       off_t offset, origoffset, startoffset;
        daddr_t lbn, blkno;
-       int s, i, error, npages, cidx, bsize, bshift, run;
-       int dev_bshift, dev_bsize;
+       int s, i, error, npages, run, cidx, pidx, pcount;
+       int bsize, bshift, dev_bshift, dev_bsize;
        int flags = ap->a_flags;
-       size_t bytes, iobytes, tailbytes;
+       size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
+       boolean_t sawhole = FALSE;
        char *kva;
        struct buf *bp, *mbp;
        struct vnode *vp = ap->a_vp;
        struct uvm_object *uobj = &vp->v_uvm.u_obj;
-       struct vm_page *pg, *pgs[16];  /* XXX 16 */
-       struct ucred *cred = curproc->p_ucred;
+       struct vm_page *pg, *pgs[16];                   /* XXX 16 */
+       struct ucred *cred = curproc->p_ucred;          /* XXX curproc */
        UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
 
 #ifdef DIAGNOSTIC
@@ -434,7 +435,9 @@
                if ((flags & PGO_LOCKED) == 0) {
                        simple_unlock(&uobj->vmobjlock);
                }
-               return VM_PAGER_BAD;
+               UVMHIST_LOG(ubchist, "off 0x%x past EOF 0x%x",
+                           (int)ap->a_offset, (int)vp->v_uvm.u_size,0,0);
+               return EINVAL;
        }
 
        /*
@@ -445,8 +448,7 @@
                uvn_findpages(uobj, ap->a_offset, ap->a_count, ap->a_m,
                              UFP_NOWAIT|UFP_NOALLOC|UFP_NORDONLY);
 
-               return ap->a_m[ap->a_centeridx] == NULL ?
-                       VM_PAGER_UNLOCK : VM_PAGER_OK;
+               return ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
        }
 
        /* vnode is VOP_LOCKed, uobj is locked */
@@ -454,176 +456,161 @@
        error = 0;
 
        /*
-        * XXX do the findpages for our 1 page first,
-        * change asyncget to take the one page as an arg and
-        * pretend that its findpages found it.
-        */
-
-       /*
-        * kick off a big read first to get some readahead, then
-        * get the one page we wanted.
+        * find our center page and make some simple checks.
         */
 
-       if ((flags & PGO_OVERWRITE) == 0 &&
-           (ap->a_offset & (MAXBSIZE - 1)) == 0) {
-               /*
-                * XXX pretty sure unlocking here is wrong.
-                */
-               simple_unlock(&uobj->vmobjlock);
-               uvm_vnp_asyncget(vp, ap->a_offset, MAXBSIZE);
-               simple_lock(&uobj->vmobjlock);
-       }
+       origoffset = ap->a_offset + (ap->a_centeridx << PAGE_SHIFT);
+       pg = NULL;
+       npages = 1;
+       uvn_findpages(uobj, origoffset, &npages, &pg, 0);
 
        /*
-        * find the page we want.
+        * if PGO_OVERWRITE is set, don't bother reading the page.
+        * PGO_OVERWRITE also means that the caller guarantees
+        * that the page already has backing store allocated.
         */
 
-       origoffset = offset = ap->a_offset + (ap->a_centeridx << PAGE_SHIFT);
-       npages = 1;
-       pg = NULL;
-       uvn_findpages(uobj, offset, &npages, &pg, 0);
-       simple_unlock(&uobj->vmobjlock);
+       if (flags & PGO_OVERWRITE) {
+               UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
+
+               /* XXX for now, zero the page if we allocated it */
+               if (pg->flags & PG_FAKE) {
+                       uvm_pagezero(pg);
+               }
+
+               simple_unlock(&uobj->vmobjlock);
+               goto out;
+       }
 
        /*
         * if the page is already resident, just return it.
         */
 
        if ((pg->flags & PG_FAKE) == 0 &&
-           !((ap->a_access_type & VM_PROT_WRITE) && (pg->flags & PG_RDONLY))) {
-               ap->a_m[ap->a_centeridx] = pg;
-               return VM_PAGER_OK;
-       }
-       UVMHIST_LOG(ubchist, "pg %p flags 0x%x access_type 0x%x",
-                   pg, (int)pg->flags, (int)ap->a_access_type, 0);
+           !((ap->a_access_type & VM_PROT_WRITE) &&
+             (pg->flags & PG_RDONLY))) {
 
-       /*
-        * don't bother reading the page if we're just going to
-        * overwrite it.
-        */
-
-       if (flags & PGO_OVERWRITE) {
-               UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
-
-               /* XXX for now, zero the page */
-               if (pg->flags & PG_FAKE) {
-                       uvm_pagezero(pg);
-               }
-
-               goto out;
+               UVMHIST_LOG(ubchist, "returning cached pg %p", pg,0,0,0);
+               uvm_pageactivate(pg);
+               ap->a_m[ap->a_centeridx] = pg;
+               simple_unlock(&uobj->vmobjlock);
+               return 0;
        }
 
        /*
-        * ok, really read the desired page.
+        * the page wasn't resident and we're not overwriting,
+        * so we're going to have to do some i/o.
+        * expand the fault to cover at least 1 block.
         */
 
        bshift = vp->v_mount->mnt_fs_bshift;
        bsize = 1 << bshift;
        dev_bshift = vp->v_mount->mnt_dev_bshift;
        dev_bsize = 1 << dev_bshift;
-       bytes = min(*ap->a_count << PAGE_SHIFT,
-                   (vp->v_uvm.u_size - offset + dev_bsize - 1) &
+
+       startoffset = offset = origoffset & ~(bsize - 1);
+       cidx = (origoffset - offset) >> PAGE_SHIFT;
+       npages = max(*ap->a_count + cidx, bsize >> PAGE_SHIFT);
+
+       if (npages == 1) {
+               pgs[0] = pg;
+       } else {
+               int n = npages;
+               memset(pgs, 0, sizeof(pgs));
+               pgs[cidx] = PGO_DONTCARE;
+               uvn_findpages(uobj, offset, &n, pgs, 0);
+               pgs[cidx] = pg;
+       }
+       simple_unlock(&uobj->vmobjlock);
+
+       /*
+        * read the desired page(s).
+        */
+
+       totalbytes = npages << PAGE_SHIFT;
+       bytes = min(totalbytes, (vp->v_uvm.u_size - offset + dev_bsize - 1) &
                    ~(dev_bsize - 1));
-       tailbytes = (*ap->a_count << PAGE_SHIFT) - bytes;
+       tailbytes = totalbytes - bytes;
+       skipbytes = 0;
 
-       kva = (void *)uvm_pagermapin(&pg, 1, M_WAITOK);
+       kva = (void *)uvm_pagermapin(pgs, npages, M_WAITOK);
 
        s = splbio();
        mbp = pool_get(&bufpool, PR_WAITOK);
        splx(s);
        mbp->b_bufsize = bytes;
        mbp->b_data = (void *)kva;
-       mbp->b_bcount = bytes;
+       mbp->b_resid = mbp->b_bcount = bytes;
        mbp->b_flags = B_BUSY|B_READ| (flags & PGO_SYNCIO ? 0 : B_CALL);
        mbp->b_iodone = uvm_aio_biodone;
        mbp->b_vp = vp;
 
        bp = NULL;
        for (; bytes > 0; offset += iobytes, bytes -= iobytes) {
-               lbn = offset >> bshift;
+
+               /*
+                * skip pages which don't need to be read.
+                */
+
+               pidx = (offset - startoffset) >> PAGE_SHIFT;
+               while ((pgs[pidx]->flags & PG_FAKE) == 0) {
+                       size_t b;
+
+                       if (offset & (PAGE_SIZE - 1)) {
+                               panic("genfs_getpages: skipping from middle "
+                                     "of page");
+                       }
+
+                       b = min(PAGE_SIZE, bytes);
+                       offset += b;
+                       bytes -= b;
+                       skipbytes += b;
+                       pidx++;
+                       if (bytes == 0) {
+                               goto loopdone;
+                       }
+               }
 
                /*
                 * bmap the file to find out the blkno to read from and
                 * how much we can read in one i/o.
                 */
 
+               lbn = offset >> bshift;
                error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
                if (error) {
                        UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
                                    lbn, error,0,0);
                        goto errout;
                }
+
+               /*
+                * see how many pages need to be read with this i/o.
+                * reduce the i/o size if necessary.
+                */
+
                iobytes = min(((lbn + 1 + run) << bshift) - offset, bytes);
+               if (offset + iobytes > round_page(offset)) {
+                       pcount = 1;
+                       while (pidx + pcount < npages &&
+                              pgs[pidx + pcount]->flags & PG_FAKE) {
+                               pcount++;
+                       }
+                       iobytes = min(iobytes, (pcount << PAGE_SHIFT) -
+                                     (offset - trunc_page(offset)));
+               }
+
+               /*
+                * if this block isn't allocated, zero it instead of reading it.
+                */
 
                if (blkno == (daddr_t)-1) {
                        UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
 
-                       /*
-                        * for read faults, we can skip the block allocation
-                        * by marking the page PG_RDONLY and PG_CLEAN.
-                        */
-
-                       if ((ap->a_access_type & VM_PROT_WRITE) == 0) {
-                               memset(kva + (offset - origoffset), 0,
-                                      min(1 << bshift, PAGE_SIZE -
-                                          (offset - origoffset)));
-
-                               pg->flags |= PG_CLEAN|PG_RDONLY;
-                               UVMHIST_LOG(ubchist, "setting PG_RDONLY",
-                                           0,0,0,0);
-                               continue;
-                       }
-
-                       /*
-                        * for write faults, we must allocate backing store
-                        * now and make sure the block is zeroed.
-                        */
-
-                       error = VOP_BALLOC(vp, offset, bsize, cred, NULL, 0);
-                       if (error) {
-                               UVMHIST_LOG(ubchist, "balloc lbn 0x%x -> %d",
-                                           lbn, error,0,0);
-                               goto errout;
-                       }
-
-                       error = VOP_BMAP(vp, lbn, NULL, &blkno, NULL);
-                       if (error) {
-                               UVMHIST_LOG(ubchist, "bmap2 lbn 0x%x -> %d",
-                                           lbn, error,0,0);
-                               goto errout;
-                       }
-
-                       simple_lock(&uobj->vmobjlock);
-                       npages = max(bsize >> PAGE_SHIFT, 1);
-                       if (npages > 1) {
-                               int idx = (offset - (lbn << bshift)) >> bshift;



Home | Main Index | Thread Index | Old Index