tech-kern archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

Re: UVM/genfs review



On Wed, Jan 14, 2009 at 02:43:53PM -0600, Greg Oster wrote:
> So where are we at on this?  Does the deadlock that Juergen refers to 
> occur only with fss+wapbl or with fss+other as well?

The patch only changes the way the WAPBL journal is locked, so fss on
non-WAPBL doesn't change. I simply don't know how easy it is to hit that
deadlock. I do have issues with snapacct panicing during the suspend of
an active filesystem. Once the filesystem was suspended I could tar the
suspended filesystem up while a bulk build was running.

To make sure we are all talking about the same patch, attached is the
version against -current. It slightly differs from the older netbsd-5
version due to the changes for directio locking.

Joerg
Index: genfs_io.c
===================================================================
RCS file: /home/joerg/repo/netbsd/src/sys/miscfs/genfs/genfs_io.c,v
retrieving revision 1.16
diff -u -p -r1.16 genfs_io.c
--- genfs_io.c  1 Dec 2008 11:22:12 -0000       1.16
+++ genfs_io.c  13 Jan 2009 17:05:33 -0000
@@ -775,7 +775,6 @@ genfs_do_putpages(struct vnode *vp, off_
        int flags;
        int dirtygen;
        bool modified;
-       bool need_wapbl;
        bool has_trans;
        bool cleanall;
        bool onworklst;
@@ -790,8 +789,6 @@ genfs_do_putpages(struct vnode *vp, off_
            vp, uobj->uo_npages, startoff, endoff - startoff);
 
        has_trans = false;
-       need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
-           (origflags & PGO_JOURNALLOCKED) == 0);
 
 retry:
        modified = false;
@@ -805,8 +802,6 @@ retry:
                                vn_syncer_remove_from_worklist(vp);
                }
                if (has_trans) {
-                       if (need_wapbl)
-                               WAPBL_END(vp->v_mount);
                        fstrans_done(vp->v_mount);
                }
                mutex_exit(slock);
@@ -825,13 +820,6 @@ retry:
                                return error;
                } else
                        fstrans_start(vp->v_mount, FSTRANS_LAZY);
-               if (need_wapbl) {
-                       error = WAPBL_BEGIN(vp->v_mount);
-                       if (error) {
-                               fstrans_done(vp->v_mount);
-                               return error;
-                       }
-               }
                has_trans = true;
                mutex_enter(slock);
                goto retry;
@@ -1206,8 +1194,6 @@ skip_scan:
        }
 
        if (has_trans) {
-               if (need_wapbl)
-                       WAPBL_END(vp->v_mount);
                fstrans_done(vp->v_mount);
        }
 
@@ -1279,6 +1265,8 @@ genfs_do_io(struct vnode *vp, off_t off,
        struct vnode *devvp;
        bool async = (flags & PGO_SYNCIO) == 0;
        bool write = rw == UIO_WRITE;
+       bool need_wapbl = (write && vp->v_mount && vp->v_mount->mnt_wapbl &&
+           (flags & PGO_JOURNALLOCKED) == 0);
        int brw = write ? B_WRITE : B_READ;
        UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
 
@@ -1300,6 +1288,12 @@ genfs_do_io(struct vnode *vp, off_t off,
        skipbytes = 0;
        KASSERT(bytes != 0);
 
+       if (need_wapbl) {
+               error = WAPBL_BEGIN(vp->v_mount);
+               if (error)
+                       return error;
+       }
+
        if (write) {
                mutex_enter(&vp->v_interlock);
                vp->v_numoutput += 2;
@@ -1376,14 +1370,19 @@ genfs_do_io(struct vnode *vp, off_t off,
        nestiobuf_done(mbp, skipbytes, error);
        if (async) {
                UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
-               return (0);
+               error = 0;
+       } else {
+               UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
+               error = biowait(mbp);
+               s = splbio();
+               (*iodone)(mbp);
+               splx(s);
+               UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
        }
-       UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
-       error = biowait(mbp);
-       s = splbio();
-       (*iodone)(mbp);
-       splx(s);
-       UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
+
+       if (need_wapbl)
+               WAPBL_END(vp->v_mount);
+
        return (error);
 }
 
@@ -1560,8 +1559,6 @@ genfs_directio(struct vnode *vp, struct 
        size_t len;
        const int mask = DEV_BSIZE - 1;
        int error;
-       bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
-           (ioflag & IO_JOURNALLOCKED) == 0);
 
        /*
         * We only support direct I/O to user space for now.
@@ -1583,12 +1580,6 @@ genfs_directio(struct vnode *vp, struct 
                return;
        }
 
-       if (need_wapbl) {
-               error = WAPBL_BEGIN(vp->v_mount);
-               if (error)
-                       return;
-       }
-
        /*
         * Do as much of the uio as possible with direct I/O.
         */
@@ -1634,9 +1625,6 @@ genfs_directio(struct vnode *vp, struct 
                uio->uio_offset += len;
                uio->uio_resid -= len;
        }
-
-       if (need_wapbl)
-               WAPBL_END(vp->v_mount);
 }
 
 /*
@@ -1743,7 +1731,8 @@ genfs_do_directio(struct vmspace *vs, va
         */
 
        koff = uva - trunc_page(uva);
-       error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
+       error = genfs_do_io(vp, off, kva + koff, len,
+                           PGO_SYNCIO | PGO_JOURNALLOCKED, rw,
                            genfs_dio_iodone);
 
        /*


Home | Main Index | Thread Index | Old Index