Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/chs-ubc2]: src/sys/miscfs/genfs create genfs_getpages() and genfs_putpag...



details:   https://anonhg.NetBSD.org/src/rev/85dc8484d4b3
branches:  chs-ubc2
changeset: 471363:85dc8484d4b3
user:      chs <chs%NetBSD.org@localhost>
date:      Sun Jul 04 01:44:43 1999 +0000

description:
create genfs_getpages() and genfs_putpages().
these should be able to handle most of the local-disk filesystems.

diffstat:

 sys/miscfs/genfs/genfs.h       |    5 +-
 sys/miscfs/genfs/genfs_vnops.c |  436 ++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 439 insertions(+), 2 deletions(-)

diffs (truncated from 470 to 300 lines):

diff -r c62614e974e1 -r 85dc8484d4b3 sys/miscfs/genfs/genfs.h
--- a/sys/miscfs/genfs/genfs.h  Sun Jul 04 01:42:26 1999 +0000
+++ b/sys/miscfs/genfs/genfs.h  Sun Jul 04 01:44:43 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: genfs.h,v 1.8 1998/08/13 09:59:52 kleink Exp $ */
+/*     $NetBSD: genfs.h,v 1.8.8.1 1999/07/04 01:44:43 chs Exp $        */
 
 int    genfs_badop     __P((void *));
 int    genfs_nullop    __P((void *));
@@ -16,3 +16,6 @@
 int    genfs_abortop   __P((void *));
 int    genfs_revoke    __P((void *));
 int    genfs_lease_check __P((void *));
+
+int    genfs_getpages __P((void *));
+int    genfs_putpages __P((void *));
diff -r c62614e974e1 -r 85dc8484d4b3 sys/miscfs/genfs/genfs_vnops.c
--- a/sys/miscfs/genfs/genfs_vnops.c    Sun Jul 04 01:42:26 1999 +0000
+++ b/sys/miscfs/genfs/genfs_vnops.c    Sun Jul 04 01:44:43 1999 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: genfs_vnops.c,v 1.11 1999/03/05 21:09:49 mycroft Exp $ */
+/*     $NetBSD: genfs_vnops.c,v 1.11.4.1 1999/07/04 01:44:43 chs Exp $ */
 
 /*
  * Copyright (c) 1982, 1986, 1989, 1993
@@ -49,6 +49,9 @@
 #include <miscfs/genfs/genfs.h>
 #include <miscfs/specfs/specdev.h>
 
+#include <vm/vm.h>
+#include <uvm/uvm.h>
+
 #ifdef NFSSERVER
 #include <nfs/rpcv2.h>
 #include <nfs/nfsproto.h>
@@ -377,3 +380,434 @@
        return (0);
 #endif /* NFSSERVER */
 }
+
+
+/*
+ * generic VM getpages routine.
+ * Return PG_BUSY pages for the given range,
+ * reading from backing store if necessary.
+ */
+int
+genfs_getpages(v)
+       void *v;
+{
+       struct vop_getpages_args /* {
+               struct vnode *a_vp;
+               vaddr_t a_offset;
+               vm_page_t *a_m;
+               int *a_count;
+               int a_centeridx;
+               vm_prot_t a_access_type;
+               int a_advice;
+               int a_flags;
+       } */ *ap = v;
+
+       off_t offset, origoffset;
+       daddr_t lbn, blkno;
+       int s, i, error, npages, cidx, bsize, bshift, run;
+       int dev_bshift, dev_bsize;
+       int flags = ap->a_flags;
+       size_t bytes, iobytes, tailbytes;
+       char *kva;
+       struct buf *bp, *mbp;
+       struct vnode *vp = ap->a_vp;
+       struct uvm_object *uobj = &vp->v_uvm.u_obj;
+       struct vm_page *pg, *pgs[16];  /* XXX 16 */
+       struct ucred *cred = curproc->p_ucred;
+       UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
+
+#ifdef DIAGNOSTIC
+       if (ap->a_centeridx < 0 || ap->a_centeridx > *ap->a_count) {
+               panic("genfs_getpages: centeridx %d out of range",
+                     ap->a_centeridx);
+       }
+       if (ap->a_offset & (PAGE_SIZE - 1)) {
+               panic("genfs_getpages: offset 0x%x", (int)ap->a_offset);
+       }
+#endif
+
+       /*
+        * Bounds-check the request.
+        */
+
+       if (ap->a_offset >= vp->v_uvm.u_size) {
+               if ((flags & PGO_LOCKED) == 0) {
+                       simple_unlock(&uobj->vmobjlock);
+               }
+               return VM_PAGER_BAD;
+       }
+
+       /*
+        * For PGO_LOCKED requests, just return whatever's in memory.
+        */
+
+       if (flags & PGO_LOCKED) {
+               uvn_findpages(uobj, ap->a_offset, ap->a_count, ap->a_m,
+                             UFP_NOWAIT|UFP_NOALLOC|UFP_NORDONLY);
+
+               return ap->a_m[ap->a_centeridx] == NULL ?
+                       VM_PAGER_UNLOCK : VM_PAGER_OK;
+       }
+
+       /* vnode is VOP_LOCKed, uobj is locked */
+
+       error = 0;
+
+       /*
+        * XXX do the findpages for our 1 page first,
+        * change asyncget to take the one page as an arg and
+        * pretend that its findpages found it.
+        */
+
+       /*
+        * kick off a big read first to get some readahead, then
+        * get the one page we wanted.
+        */
+
+       if ((flags & PGO_OVERWRITE) == 0 &&
+           (ap->a_offset & (MAXBSIZE - 1)) == 0) {
+               /*
+                * XXX pretty sure unlocking here is wrong.
+                */
+               simple_unlock(&uobj->vmobjlock);
+               uvm_vnp_asyncget(vp, ap->a_offset, MAXBSIZE);
+               simple_lock(&uobj->vmobjlock);
+       }
+
+       /*
+        * find the page we want.
+        */
+
+       origoffset = offset = ap->a_offset + (ap->a_centeridx << PAGE_SHIFT);
+       npages = 1;
+       pg = NULL;
+       uvn_findpages(uobj, offset, &npages, &pg, 0);
+       simple_unlock(&uobj->vmobjlock);
+
+       /*
+        * if the page is already resident, just return it.
+        */
+
+       if ((pg->flags & PG_FAKE) == 0 &&
+           !((ap->a_access_type & VM_PROT_WRITE) && (pg->flags & PG_RDONLY))) {
+               ap->a_m[ap->a_centeridx] = pg;
+               return VM_PAGER_OK;
+       }
+       UVMHIST_LOG(ubchist, "pg %p flags 0x%x access_type 0x%x",
+                   pg, (int)pg->flags, (int)ap->a_access_type, 0);
+
+       /*
+        * don't bother reading the page if we're just going to
+        * overwrite it.
+        */
+
+       if (flags & PGO_OVERWRITE) {
+               UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
+
+               /* XXX for now, zero the page */
+               if (pg->flags & PG_FAKE) {
+                       uvm_pagezero(pg);
+               }
+
+               goto out;
+       }
+
+       /*
+        * ok, really read the desired page.
+        */
+
+       bshift = vp->v_mount->mnt_fs_bshift;
+       bsize = 1 << bshift;
+       dev_bshift = vp->v_mount->mnt_dev_bshift;
+       dev_bsize = 1 << dev_bshift;
+       bytes = min(*ap->a_count << PAGE_SHIFT,
+                   (vp->v_uvm.u_size - offset + dev_bsize - 1) &
+                   ~(dev_bsize - 1));
+       tailbytes = (*ap->a_count << PAGE_SHIFT) - bytes;
+
+       kva = (void *)uvm_pagermapin(&pg, 1, M_WAITOK);
+
+       s = splbio();
+       mbp = pool_get(&bufpool, PR_WAITOK);
+       splx(s);
+       mbp->b_bufsize = bytes;
+       mbp->b_data = (void *)kva;
+       mbp->b_bcount = bytes;
+       mbp->b_flags = B_BUSY|B_READ| (flags & PGO_SYNCIO ? 0 : B_CALL);
+       mbp->b_iodone = uvm_aio_biodone;
+       mbp->b_vp = vp;
+
+       bp = NULL;
+       for (; bytes > 0; offset += iobytes, bytes -= iobytes) {
+               lbn = offset >> bshift;
+
+               /*
+                * bmap the file to find out the blkno to read from and
+                * how much we can read in one i/o.
+                */
+
+               error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
+               if (error) {
+                       UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
+                                   lbn, error,0,0);
+                       goto errout;
+               }
+               iobytes = min(((lbn + 1 + run) << bshift) - offset, bytes);
+
+               if (blkno == (daddr_t)-1) {
+                       UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
+
+                       /*
+                        * for read faults, we can skip the block allocation
+                        * by marking the page PG_RDONLY and PG_CLEAN.
+                        */
+
+                       if ((ap->a_access_type & VM_PROT_WRITE) == 0) {
+                               memset(kva + (offset - origoffset), 0,
+                                      min(1 << bshift, PAGE_SIZE -
+                                          (offset - origoffset)));
+
+                               pg->flags |= PG_CLEAN|PG_RDONLY;
+                               UVMHIST_LOG(ubchist, "setting PG_RDONLY",
+                                           0,0,0,0);
+                               continue;
+                       }
+
+                       /*
+                        * for write faults, we must allocate backing store
+                        * now and make sure the block is zeroed.
+                        */
+
+                       error = VOP_BALLOC(vp, offset, bsize, cred, NULL, 0);
+                       if (error) {
+                               UVMHIST_LOG(ubchist, "balloc lbn 0x%x -> %d",
+                                           lbn, error,0,0);
+                               goto errout;
+                       }
+
+                       error = VOP_BMAP(vp, lbn, NULL, &blkno, NULL);
+                       if (error) {
+                               UVMHIST_LOG(ubchist, "bmap2 lbn 0x%x -> %d",
+                                           lbn, error,0,0);
+                               goto errout;
+                       }
+
+                       simple_lock(&uobj->vmobjlock);
+                       npages = max(bsize >> PAGE_SHIFT, 1);
+                       if (npages > 1) {
+                               int idx = (offset - (lbn << bshift)) >> bshift;
+                               pgs[idx] = PGO_DONTCARE;
+                               uvn_findpages(uobj, offset &
+                                             ~((off_t)bsize - 1),
+                                             &npages, pgs, 0);
+                               pgs[idx] = pg;
+                               for (i = 0; i < npages; i++) {
+                                       uvm_pagezero(pgs[i]);
+                                       uvm_pageactivate(pgs[i]);
+
+                                       /*
+                                        * don't bother clearing mod/ref,
+                                        * the block is being modified anyways.
+                                        */
+
+                                       pgs[i]->flags &= ~(PG_FAKE|PG_RDONLY);
+                               }
+                       } else {
+                               memset(kva + (offset - origoffset), 0, bsize);
+                       }
+
+/* XXX is this cidx stuff right? */
+                       cidx = (offset >> PAGE_SHIFT) -
+                               (origoffset >> PAGE_SHIFT);
+                       pg = pgs[cidx];
+                       pgs[cidx] = NULL;
+                       uvm_pager_dropcluster(uobj, NULL, pgs, &npages, 0, 0);
+                       simple_unlock(&uobj->vmobjlock);
+                       UVMHIST_LOG(ubchist, "cleared pages",0,0,0,0);
+                       continue;
+               }
+
+               /*
+                * allocate a sub-buf for this piece of the i/o
+                * (or just use mbp if there's only 1 piece),
+                * and start it going.
+                */
+
+               if (bp == NULL && iobytes == bytes) {
+                       bp = mbp;
+               } else {
+                       s = splbio();
+                       bp = pool_get(&bufpool, PR_WAITOK);
+                       splx(s);
+                       bp->b_data = (void *)(kva + offset - pg->offset);
+                       bp->b_bcount = iobytes;



Home | Main Index | Thread Index | Old Index