Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src Merge from yamt-pagecache (after much testing):



details:   https://anonhg.NetBSD.org/src/rev/c0624a2b89a1
branches:  trunk
changeset: 848045:c0624a2b89a1
user:      ad <ad%NetBSD.org@localhost>
date:      Wed Jan 15 17:55:43 2020 +0000

description:
Merge from yamt-pagecache (after much testing):

- Reduce unnecessary page scan in putpages esp. when an object has a ton of
  pages cached but only a few of them are dirty.

- Reduce the number of pmap operations by tracking page dirtiness more
  precisely in uvm layer.

diffstat:

 external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c |   10 +-
 sys/external/bsd/drm2/dist/drm/drm_gem.c               |   10 +-
 sys/external/bsd/drm2/dist/drm/i915/i915_gem.c         |    6 +-
 sys/external/bsd/drm2/dist/drm/i915/i915_gem_fence.c   |    6 +-
 sys/external/bsd/drm2/include/linux/mm.h               |    4 +-
 sys/miscfs/genfs/genfs_io.c                            |  282 ++++++++++------
 sys/miscfs/genfs/genfs_node.h                          |    3 +-
 sys/nfs/nfs_bio.c                                      |    7 +-
 sys/rump/librump/rumpkern/Makefile.rumpkern            |    4 +-
 sys/rump/librump/rumpkern/vm.c                         |    6 +-
 sys/rump/librump/rumpvfs/vm_vfs.c                      |   12 +-
 sys/sys/cpu_data.h                                     |   12 +-
 sys/ufs/lfs/lfs_pages.c                                |   15 +-
 sys/ufs/lfs/lfs_segment.c                              |    7 +-
 sys/ufs/lfs/lfs_vfsops.c                               |    7 +-
 sys/ufs/lfs/ulfs_inode.c                               |   20 +-
 sys/ufs/ufs/ufs_inode.c                                |    8 +-
 sys/uvm/files.uvm                                      |    3 +-
 sys/uvm/uvm_anon.c                                     |    6 +-
 sys/uvm/uvm_aobj.c                                     |   28 +-
 sys/uvm/uvm_bio.c                                      |   35 +-
 sys/uvm/uvm_extern.h                                   |   12 +-
 sys/uvm/uvm_fault.c                                    |   67 ++-
 sys/uvm/uvm_loan.c                                     |   35 +-
 sys/uvm/uvm_meter.c                                    |   10 +-
 sys/uvm/uvm_object.c                                   |    7 +-
 sys/uvm/uvm_object.h                                   |    9 +-
 sys/uvm/uvm_page.c                                     |  158 +++++++--
 sys/uvm/uvm_page.h                                     |  105 ++++--
 sys/uvm/uvm_page_array.c                               |    9 +-
 sys/uvm/uvm_page_status.c                              |  194 +++++++++++
 sys/uvm/uvm_pager.c                                    |   18 +-
 sys/uvm/uvm_pdaemon.c                                  |   18 +-
 sys/uvm/uvm_vnode.c                                    |  111 +++++-
 34 files changed, 886 insertions(+), 358 deletions(-)

diffs (truncated from 2739 to 300 lines):

diff -r cc68f749bd4b -r c0624a2b89a1 external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c
--- a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c    Wed Jan 15 15:32:05 2020 +0000
+++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c    Wed Jan 15 17:55:43 2020 +0000
@@ -746,7 +746,8 @@
                pp = NULL;
                npages = 1;
                mutex_enter(mtx);
-               found = uvn_findpages(uobj, start, &npages, &pp, UFP_NOALLOC);
+               found = uvn_findpages(uobj, start, &npages, &pp, NULL,
+                   UFP_NOALLOC);
                mutex_exit(mtx);
 
                /* XXXNETBSD shouldn't access userspace with the page busy */
@@ -792,7 +793,8 @@
 
                pp = NULL;
                npages = 1;
-               found = uvn_findpages(uobj, start, &npages, &pp, UFP_NOALLOC);
+               found = uvn_findpages(uobj, start, &npages, &pp, NULL,
+                   UFP_NOALLOC);
                if (found) {
                        mutex_exit(mtx);
 
@@ -5976,7 +5978,7 @@
        }
        npages = 1;
        pg = NULL;
-       uvn_findpages(uobj, offset, &npages, &pg, UFP_ALL);
+       uvn_findpages(uobj, offset, &npages, &pg, NULL, UFP_ALL);
 
        if (pg->flags & PG_FAKE) {
                mutex_exit(mtx);
@@ -6224,7 +6226,7 @@
        mutex_enter(mtx);
        count = 1;
        pg = NULL;
-       if (uvn_findpages(uobj, tsize, &count, &pg, UFP_NOALLOC)) {
+       if (uvn_findpages(uobj, tsize, &count, &pg, NULL, UFP_NOALLOC)) {
                va = zfs_map_page(pg, S_WRITE);
                pgoff = size - tsize;
                memset(va + pgoff, 0, PAGESIZE - pgoff);
diff -r cc68f749bd4b -r c0624a2b89a1 sys/external/bsd/drm2/dist/drm/drm_gem.c
--- a/sys/external/bsd/drm2/dist/drm/drm_gem.c  Wed Jan 15 15:32:05 2020 +0000
+++ b/sys/external/bsd/drm2/dist/drm/drm_gem.c  Wed Jan 15 17:55:43 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: drm_gem.c,v 1.10 2018/08/27 15:22:53 riastradh Exp $   */
+/*     $NetBSD: drm_gem.c,v 1.11 2020/01/15 17:55:43 ad Exp $  */
 
 /*
  * Copyright © 2008 Intel Corporation
@@ -28,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: drm_gem.c,v 1.10 2018/08/27 15:22:53 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: drm_gem.c,v 1.11 2020/01/15 17:55:43 ad Exp $");
 
 #include <linux/types.h>
 #include <linux/slab.h>
@@ -612,8 +612,10 @@
        unsigned i;
 
        for (i = 0; i < (obj->size >> PAGE_SHIFT); i++) {
-               if (dirty)
-                       pages[i]->p_vmp.flags &= ~PG_CLEAN;
+               if (dirty) {
+                       uvm_pagemarkdirty(&pages[i]->p_vmp,
+                           UVM_PAGE_STATUS_DIRTY);
+               }
        }
 
        uvm_obj_unwirepages(obj->filp, 0, obj->size);
diff -r cc68f749bd4b -r c0624a2b89a1 sys/external/bsd/drm2/dist/drm/i915/i915_gem.c
--- a/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c    Wed Jan 15 15:32:05 2020 +0000
+++ b/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c    Wed Jan 15 17:55:43 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: i915_gem.c,v 1.54 2018/08/27 15:22:54 riastradh Exp $  */
+/*     $NetBSD: i915_gem.c,v 1.55 2020/01/15 17:55:43 ad Exp $ */
 
 /*
  * Copyright © 2008-2015 Intel Corporation
@@ -28,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem.c,v 1.54 2018/08/27 15:22:54 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem.c,v 1.55 2020/01/15 17:55:43 ad Exp $");
 
 #ifdef __NetBSD__
 #if 0                          /* XXX uvmhist option?  */
@@ -2644,7 +2644,7 @@
 
        if (obj->dirty) {
                TAILQ_FOREACH(page, &obj->pageq, pageq.queue) {
-                       page->flags &= ~PG_CLEAN;
+                       uvm_pagemarkdirty(page, UVM_PAGE_STATUS_DIRTY);
                        /* XXX mark page accessed */
                }
        }
diff -r cc68f749bd4b -r c0624a2b89a1 sys/external/bsd/drm2/dist/drm/i915/i915_gem_fence.c
--- a/sys/external/bsd/drm2/dist/drm/i915/i915_gem_fence.c      Wed Jan 15 15:32:05 2020 +0000
+++ b/sys/external/bsd/drm2/dist/drm/i915/i915_gem_fence.c      Wed Jan 15 17:55:43 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: i915_gem_fence.c,v 1.5 2018/08/27 15:09:35 riastradh Exp $     */
+/*     $NetBSD: i915_gem_fence.c,v 1.6 2020/01/15 17:55:43 ad Exp $    */
 
 /*
  * Copyright © 2008-2015 Intel Corporation
@@ -24,7 +24,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_fence.c,v 1.5 2018/08/27 15:09:35 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_fence.c,v 1.6 2020/01/15 17:55:43 ad Exp $");
 
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
@@ -769,7 +769,7 @@
                    (test_bit(i, obj->bit_17) != 0)) {
                        i915_gem_swizzle_page(container_of(page, struct page,
                                p_vmp));
-                       page->flags &= ~PG_CLEAN;
+                       uvm_pagemarkdirty(page, UVM_PAGE_STATUS_DIRTY);
                }
                i += 1;
        }
diff -r cc68f749bd4b -r c0624a2b89a1 sys/external/bsd/drm2/include/linux/mm.h
--- a/sys/external/bsd/drm2/include/linux/mm.h  Wed Jan 15 15:32:05 2020 +0000
+++ b/sys/external/bsd/drm2/include/linux/mm.h  Wed Jan 15 17:55:43 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: mm.h,v 1.9 2018/08/27 13:44:54 riastradh Exp $ */
+/*     $NetBSD: mm.h,v 1.10 2020/01/15 17:55:44 ad Exp $       */
 
 /*-
  * Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -96,7 +96,7 @@
 set_page_dirty(struct page *page)
 {
 
-       page->p_vmp.flags &= ~PG_CLEAN;
+       uvm_pagemarkdirty(&page->p_vmp, UVM_PAGE_STATUS_DIRTY);
 }
 
 #endif  /* _LINUX_MM_H_ */
diff -r cc68f749bd4b -r c0624a2b89a1 sys/miscfs/genfs/genfs_io.c
--- a/sys/miscfs/genfs/genfs_io.c       Wed Jan 15 15:32:05 2020 +0000
+++ b/sys/miscfs/genfs/genfs_io.c       Wed Jan 15 17:55:43 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: genfs_io.c,v 1.83 2019/12/31 22:42:50 ad Exp $ */
+/*     $NetBSD: genfs_io.c,v 1.84 2020/01/15 17:55:44 ad Exp $ */
 
 /*
  * Copyright (c) 1982, 1986, 1989, 1993
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.83 2019/12/31 22:42:50 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.84 2020/01/15 17:55:44 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -86,10 +86,8 @@
 static void
 genfs_markdirty(struct vnode *vp)
 {
-       struct genfs_node * const gp = VTOG(vp);
 
        KASSERT(mutex_owned(vp->v_interlock));
-       gp->g_dirtygen++;
        if ((vp->v_iflag & VI_ONWORKLST) == 0) {
                vn_syncer_add_to_worklist(vp, filedelay);
        }
@@ -137,6 +135,7 @@
        UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx/%jx count %jd",
            (uintptr_t)vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
 
+       KASSERT(memwrite >= overwrite);
        KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
            vp->v_type == VLNK || vp->v_type == VBLK);
 
@@ -231,12 +230,17 @@
                }
 #endif /* defined(DEBUG) */
                nfound = uvn_findpages(uobj, origoffset, &npages,
-                   ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
+                   ap->a_m, NULL,
+                   UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
                KASSERT(npages == *ap->a_count);
                if (nfound == 0) {
                        error = EBUSY;
                        goto out_err;
                }
+               /*
+                * lock and unlock g_glock to ensure that no one is truncating
+                * the file behind us.
+                */
                if (!genfs_node_rdtrylock(vp)) {
                        genfs_rel_pages(ap->a_m, npages);
 
@@ -258,6 +262,17 @@
                }
                error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
                if (error == 0 && memwrite) {
+                       for (i = 0; i < npages; i++) {
+                               pg = ap->a_m[i];
+                               if (pg == NULL || pg == PGO_DONTCARE) {
+                                       continue;
+                               }
+                               if (uvm_pagegetdirty(pg) ==
+                                   UVM_PAGE_STATUS_CLEAN) {
+                                       uvm_pagemarkdirty(pg,
+                                           UVM_PAGE_STATUS_UNKNOWN);
+                               }
+                       }
                        genfs_markdirty(vp);
                }
                goto out_err;
@@ -351,7 +366,7 @@
                goto startover;
        }
 
-       if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
+       if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], NULL,
            async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
                if (!glocked) {
                        genfs_node_unlock(vp);
@@ -364,27 +379,6 @@
        }
 
        /*
-        * if the pages are already resident, just return them.
-        */
-
-       for (i = 0; i < npages; i++) {
-               struct vm_page *pg = pgs[ridx + i];
-
-               if ((pg->flags & PG_FAKE) ||
-                   (blockalloc && (pg->flags & PG_RDONLY))) {
-                       break;
-               }
-       }
-       if (i == npages) {
-               if (!glocked) {
-                       genfs_node_unlock(vp);
-               }
-               UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
-               npages += ridx;
-               goto out;
-       }
-
-       /*
         * if PGO_OVERWRITE is set, don't bother reading the pages.
         */
 
@@ -397,13 +391,50 @@
                for (i = 0; i < npages; i++) {
                        struct vm_page *pg = pgs[ridx + i];
 
-                       pg->flags &= ~(PG_RDONLY|PG_CLEAN);
+                       /*
+                        * it's caller's responsibility to allocate blocks
+                        * beforehand for the overwrite case.
+                        */
+
+                       KASSERT((pg->flags & PG_RDONLY) == 0 || !blockalloc);
+                       pg->flags &= ~PG_RDONLY;
+
+                       /*
+                        * mark the page DIRTY.
+                        * otherwise another thread can do putpages and pull
+                        * our vnode from syncer's queue before our caller does
+                        * ubc_release.  note that putpages won't see CLEAN
+                        * pages even if they are BUSY.
+                        */
+
+                       uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
                }
                npages += ridx;
                goto out;
        }
 
        /*
+        * if the pages are already resident, just return them.
+        */
+
+       for (i = 0; i < npages; i++) {
+               struct vm_page *pg = pgs[ridx + i];
+
+               if ((pg->flags & PG_FAKE) ||
+                   (blockalloc && (pg->flags & PG_RDONLY) != 0)) {
+                       break;
+               }
+       }
+       if (i == npages) {
+               if (!glocked) {
+                       genfs_node_unlock(vp);
+               }
+               UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
+               npages += ridx;



Home | Main Index | Thread Index | Old Index