Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/chs-ubc2]: src/sys/miscfs/genfs genfs_getpages() now handles:



details:   https://anonhg.NetBSD.org/src/rev/9da11258d415
branches:  chs-ubc2
changeset: 471402:9da11258d415
user:      chs <chs%NetBSD.org@localhost>
date:      Sat Jul 31 18:45:33 1999 +0000

description:
genfs_getpages() now handles:
- faults on offsets past the nominal EOF during extending writes.
- returning multiple pages in the !PGO_LOCKED case if multiple pages
  are requested.
- using new VOP_BALLOC() interface to for allocating getpages with
  blocksize<pagesize.
genfs_putpages() now handles:
- writing pages which do not have full backing store allocated.

diffstat:

 sys/miscfs/genfs/genfs_vnops.c |  306 +++++++++++++++++++++++-----------------
 1 files changed, 178 insertions(+), 128 deletions(-)

diffs (truncated from 559 to 300 lines):

diff -r 2d23169f77ec -r 9da11258d415 sys/miscfs/genfs/genfs_vnops.c
--- a/sys/miscfs/genfs/genfs_vnops.c    Sat Jul 31 18:40:02 1999 +0000
+++ b/sys/miscfs/genfs/genfs_vnops.c    Sat Jul 31 18:45:33 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: genfs_vnops.c,v 1.11.4.3 1999/07/12 02:40:30 chs Exp $ */
+/*     $NetBSD: genfs_vnops.c,v 1.11.4.4 1999/07/31 18:45:33 chs Exp $ */
 
 /*
  * Copyright (c) 1982, 1986, 1989, 1993
@@ -381,12 +381,12 @@
 #endif /* NFSSERVER */
 }
 
-
 /*
  * generic VM getpages routine.
  * Return PG_BUSY pages for the given range,
  * reading from backing store if necessary.
  */
+
 int
 genfs_getpages(v)
        void *v;
@@ -402,9 +402,9 @@
                int a_flags;
        } */ *ap = v;
 
-       off_t offset, origoffset, startoffset;
+       off_t eof, offset, origoffset, startoffset, endoffset;
        daddr_t lbn, blkno;
-       int s, i, error, npages, run, cidx, pidx, pcount;
+       int s, i, error, npages, npgs, run, ridx, pidx, pcount;
        int bsize, bshift, dev_bshift, dev_bsize;
        int flags = ap->a_flags;
        size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
@@ -413,7 +413,7 @@
        struct buf *bp, *mbp;
        struct vnode *vp = ap->a_vp;
        struct uvm_object *uobj = &vp->v_uvm.u_obj;
-       struct vm_page *pg, *pgs[16];                   /* XXX 16 */
+       struct vm_page *pgs[16];                        /* XXX 16 */
        struct ucred *cred = curproc->p_ucred;          /* XXX curproc */
        UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
 
@@ -425,18 +425,22 @@
        if (ap->a_offset & (PAGE_SIZE - 1)) {
                panic("genfs_getpages: offset 0x%x", (int)ap->a_offset);
        }
+       if (*ap->a_count < 0) {
+               panic("genfs_getpages: count %d < 0", *ap->a_count);
+       }
 #endif
 
        /*
         * Bounds-check the request.
         */
 
-       if (ap->a_offset >= vp->v_uvm.u_size) {
+       eof = vp->v_uvm.u_size;
+       if (ap->a_offset >= eof) {
                if ((flags & PGO_LOCKED) == 0) {
                        simple_unlock(&uobj->vmobjlock);
                }
-               UVMHIST_LOG(ubchist, "off 0x%x past EOF 0x%x",
-                           (int)ap->a_offset, (int)vp->v_uvm.u_size,0,0);
+               UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
+                           (int)ap->a_offset, *ap->a_count, (int)eof,0);
                return EINVAL;
        }
 
@@ -451,56 +455,17 @@
                return ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
        }
 
+       if (ap->a_offset + ((*ap->a_count - 1) << PAGE_SHIFT) >= eof) {
+               panic("genfs_getpages: non LOCKED req past EOF vp %p", vp);
+       }
+
        /* vnode is VOP_LOCKed, uobj is locked */
 
        error = 0;
 
        /*
-        * find our center page and make some simple checks.
-        */
-
-       origoffset = ap->a_offset + (ap->a_centeridx << PAGE_SHIFT);
-       pg = NULL;
-       npages = 1;
-       uvn_findpages(uobj, origoffset, &npages, &pg, 0);
-
-       /*
-        * if PGO_OVERWRITE is set, don't bother reading the page.
-        * PGO_OVERWRITE also means that the caller guarantees
-        * that the page already has backing store allocated.
-        */
-
-       if (flags & PGO_OVERWRITE) {
-               UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
-
-               /* XXX for now, zero the page if we allocated it */
-               if (pg->flags & PG_FAKE) {
-                       uvm_pagezero(pg);
-               }
-
-               simple_unlock(&uobj->vmobjlock);
-               pgs[0] = pg;
-               goto out;
-       }
-
-       /*
-        * if the page is already resident, just return it.
-        */
-
-       if ((pg->flags & PG_FAKE) == 0 &&
-           !((ap->a_access_type & VM_PROT_WRITE) &&
-             (pg->flags & PG_RDONLY))) {
-               UVMHIST_LOG(ubchist, "returning cached pg %p", pg,0,0,0);
-
-               simple_unlock(&uobj->vmobjlock);
-               pgs[0] = pg;
-               goto out;
-       }
-
-       /*
-        * the page wasn't resident and we're not overwriting,
-        * so we're going to have to do some i/o.
-        * expand the fault to cover at least 1 block.
+        * find the requested pages and make some simple checks.
+        * leave space in the page array for a whole block.
         */
 
        bshift = vp->v_mount->mnt_fs_bshift;
@@ -508,18 +473,72 @@
        dev_bshift = vp->v_mount->mnt_dev_bshift;
        dev_bsize = 1 << dev_bshift;
 
-       startoffset = offset = origoffset & ~(bsize - 1);
-       cidx = (origoffset - offset) >> PAGE_SHIFT;
-       npages = max(*ap->a_count + cidx, bsize >> PAGE_SHIFT);
+       npages = *ap->a_count;
+       origoffset = ap->a_offset;
+       startoffset = origoffset & ~((off_t)bsize - 1);
+       endoffset = round_page((origoffset + (npages << PAGE_SHIFT)
+                               + bsize - 1) & ~((off_t)bsize - 1));
+       ridx = (origoffset - startoffset) >> PAGE_SHIFT;
+
+       memset(pgs, 0, sizeof(pgs));
+       uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], UFP_ALL);
+
+       /*
+        * if PGO_OVERWRITE is set, don't bother reading the pages.
+        * PGO_OVERWRITE also means that the caller guarantees
+        * that the pages already have backing store allocated.
+        */
+
+       if (flags & PGO_OVERWRITE) {
+               UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
+
+               /* XXX for now, zero the page if we allocated it */
+               for (i = 0; i < npages; i++) {
+                       struct vm_page *pg = pgs[ridx + i];
+                       if (pg->flags & PG_FAKE) {
+                               uvm_pagezero(pg);
+                               pg->flags &= ~PG_FAKE;
+                       }
+               }
+
+               simple_unlock(&uobj->vmobjlock);
+               goto out;
+       }
 
-       if (npages == 1) {
-               pgs[0] = pg;
-       } else {
-               int n = npages;
-               memset(pgs, 0, sizeof(pgs));
-               pgs[cidx] = PGO_DONTCARE;
-               uvn_findpages(uobj, offset, &n, pgs, 0);
-               pgs[cidx] = pg;
+       /*
+        * if the pages are already resident, just return them.
+        */
+
+       for (i = 0; i < npages; i++) {
+               struct vm_page *pg = pgs[ridx + i];
+
+               if ((pg->flags & PG_FAKE) != 0 ||
+                   ((ap->a_access_type & VM_PROT_WRITE) &&
+                     (pg->flags & PG_RDONLY))) {
+                       break;
+               }
+       }
+       if (i == npages) {
+               UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
+               simple_unlock(&uobj->vmobjlock);
+               goto out;
+       }
+
+       /*
+        * the page wasn't resident and we're not overwriting,
+        * so we're going to have to do some i/o.
+        * find any additional pages needed to cover the expanded range.
+        */
+
+       if (startoffset != origoffset) {
+               UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
+                           (int)startoffset, (int)endoffset, 0,0);
+               npages = (endoffset - startoffset) >> PAGE_SHIFT;
+               if (npages == 0) {
+                       panic("XXX getpages npages = 0");
+               }
+               npgs = npages;
+               uvn_findpages(uobj, startoffset, &npgs, pgs, UFP_ALL);
        }
        simple_unlock(&uobj->vmobjlock);
 
@@ -528,7 +547,8 @@
         */
 
        totalbytes = npages << PAGE_SHIFT;
-       bytes = min(totalbytes, (vp->v_uvm.u_size - offset + dev_bsize - 1) &
+       bytes = min(totalbytes,
+                   (vp->v_uvm.u_size - startoffset + dev_bsize - 1) &
                    ~(dev_bsize - 1));
        tailbytes = totalbytes - bytes;
        skipbytes = 0;
@@ -539,13 +559,26 @@
        mbp = pool_get(&bufpool, PR_WAITOK);
        splx(s);
        mbp->b_bufsize = bytes;
-       mbp->b_data = (void *)kva;
+       mbp->b_data = kva;
        mbp->b_resid = mbp->b_bcount = bytes;
        mbp->b_flags = B_BUSY|B_READ| (flags & PGO_SYNCIO ? 0 : B_CALL);
        mbp->b_iodone = uvm_aio_biodone;
        mbp->b_vp = vp;
 
+       /*
+        * if EOF is in the middle of the last page, zero the part past EOF.
+        */
+
+       if (tailbytes > 0) {
+               memset(kva + bytes, 0, tailbytes);
+       }
+
+       /*
+        * now loop over the pages, reading as needed.
+        */
+
        bp = NULL;
+       offset = startoffset;
        for (; bytes > 0; offset += iobytes, bytes -= iobytes) {
 
                /*
@@ -566,6 +599,8 @@
                        bytes -= b;
                        skipbytes += b;
                        pidx++;
+                       UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
+                                   (int)offset, 0,0,0);
                        if (bytes == 0) {
                                goto loopdone;
                        }
@@ -573,7 +608,8 @@
 
                /*
                 * bmap the file to find out the blkno to read from and
-                * how much we can read in one i/o.
+                * how much we can read in one i/o.  if bmap returns an error,
+                * skip the rest of the top-level i/o.
                 */
 
                lbn = offset >> bshift;
@@ -581,11 +617,13 @@
                if (error) {
                        UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
                                    lbn, error,0,0);
-                       goto looperr;
+                       skipbytes += bytes;
+                       tailbytes = 0;
+                       goto loopdone;
                }
 
                /*
-                * see how many pages need to be read with this i/o.
+                * see how many pages can be read with this i/o.
                 * reduce the i/o size if necessary.
                 */
 
@@ -602,15 +640,23 @@
 
                /*
                 * if this block isn't allocated, zero it instead of reading it.
+                * if this is a read access, mark the pages we zeroed PG_RDONLY.
                 */
 
                if (blkno == (daddr_t)-1) {
                        UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
 



Home | Main Index | Thread Index | Old Index