Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/uebayasi-xip]: src/sys/miscfs/genfs Share mode code. Care glock.



details:   https://anonhg.NetBSD.org/src/rev/dfb41cb4e595
branches:  uebayasi-xip
changeset: 751874:dfb41cb4e595
user:      uebayasi <uebayasi%NetBSD.org@localhost>
date:      Fri Nov 19 07:09:49 2010 +0000

description:
Share mode code.  Care glock.

diffstat:

 sys/miscfs/genfs/genfs_io.c |  51 ++++++++++++++++++++++++++++++---------------
 1 files changed, 34 insertions(+), 17 deletions(-)

diffs (116 lines):

diff -r b9d11d6abcf7 -r dfb41cb4e595 sys/miscfs/genfs/genfs_io.c
--- a/sys/miscfs/genfs/genfs_io.c       Fri Nov 19 06:38:53 2010 +0000
+++ b/sys/miscfs/genfs/genfs_io.c       Fri Nov 19 07:09:49 2010 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: genfs_io.c,v 1.36.2.42 2010/11/19 06:38:53 uebayasi Exp $      */
+/*     $NetBSD: genfs_io.c,v 1.36.2.43 2010/11/19 07:09:49 uebayasi Exp $      */
 
 /*
  * Copyright (c) 1982, 1986, 1989, 1993
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.2.42 2010/11/19 06:38:53 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.2.43 2010/11/19 07:09:49 uebayasi Exp $");
 
 #include "opt_xip.h"
 
@@ -311,20 +311,6 @@
            round_page(memeof));
        const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
 
-#if 1
-       if ((ap->a_vp->v_vflag & VV_XIP) != 0)
-               return genfs_do_getpages_xip_io(
-                       ap->a_vp,
-                       ap->a_offset,
-                       ap->a_m,
-                       ap->a_count,
-                       ap->a_centeridx,
-                       ap->a_access_type,
-                       ap->a_advice,
-                       ap->a_flags,
-                       orignmempages);
-#endif
-
        const int pgs_size = sizeof(struct vm_page *) *
            ((endoffset - startoffset) >> PAGE_SHIFT);
        struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
@@ -373,6 +359,11 @@
                goto startover;
        }
 
+#if 1
+       if ((ap->a_vp->v_vflag & VV_XIP) != 0)
+               goto find_pagecache_done;
+#endif
+
        if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
            async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
                if (!glocked) {
@@ -478,8 +469,28 @@
                }
        }
 
+#if 1
+find_pagecache_done:
+#endif
+
        mutex_exit(&uobj->vmobjlock);
 
+#if 1
+       if ((ap->a_vp->v_vflag & VV_XIP) != 0) {
+               error = genfs_do_getpages_xip_io(
+                       ap->a_vp,
+                       ap->a_offset,
+                       ap->a_m,
+                       ap->a_count,
+                       ap->a_centeridx,
+                       ap->a_access_type,
+                       ap->a_advice,
+                       ap->a_flags,
+                       orignmempages);
+               goto out_err_free;
+       }
+#endif
+
     {
        size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
        vaddr_t kva;
@@ -831,6 +842,7 @@
        const int orignmempages)
 {
        struct uvm_object * const uobj = &vp->v_uobj;
+       const bool glocked = (flags & PGO_GLOCKHELD) != 0;
 
        const int fs_bshift = vp2fs_bshift(vp);
        const int dev_bshift = vp2dev_bshift(vp);
@@ -860,6 +872,8 @@
 
        UVMHIST_FUNC("genfs_do_getpages_xip_io"); UVMHIST_CALLED(ubchist);
 
+       KASSERT(glocked || genfs_node_rdlocked(vp));
+
 #if 0
        GOP_SIZE(vp, vp->v_size, &memeof, GOP_SIZE_MEM);
        orignmempages = MIN(orignpages, round_page(memeof - origoffset) >> PAGE_SHIFT);
@@ -938,6 +952,9 @@
 
        mutex_exit(&uobj->vmobjlock);
 
+       if (!glocked)
+               genfs_node_unlock(vp);
+
        *npagesp = orignmempages;
 
        return 0;
@@ -1535,7 +1552,7 @@
                KASSERT(mutex_owned(&uobj->vmobjlock));
                mutex_exit(&uobj->vmobjlock);
                error = genfs_do_getpages_xip_io(vp, off, pgs, &npages, 0,
-                   VM_PROT_ALL, 0, 0, orignpages);
+                   VM_PROT_ALL, 0, PGO_GLOCKHELD, orignpages);
                KASSERT(error == 0);
                KASSERT(npages == orignpages);
                mutex_enter(&uobj->vmobjlock);



Home | Main Index | Thread Index | Old Index