Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm Drop & re-acquire vmobjlock less often.



details:   https://anonhg.NetBSD.org/src/rev/cd6f2af882ef
branches:  trunk
changeset: 933151:cd6f2af882ef
user:      ad <ad%NetBSD.org@localhost>
date:      Tue May 19 21:45:35 2020 +0000

description:
Drop & re-acquire vmobjlock less often.

diffstat:

 sys/uvm/uvm_readahead.c |  14 +++++++-------
 1 files changed, 7 insertions(+), 7 deletions(-)

diffs (67 lines):

diff -r 73527ac38ad2 -r cd6f2af882ef sys/uvm/uvm_readahead.c
--- a/sys/uvm/uvm_readahead.c   Tue May 19 21:43:36 2020 +0000
+++ b/sys/uvm/uvm_readahead.c   Tue May 19 21:45:35 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_readahead.c,v 1.12 2020/03/08 18:40:29 ad Exp $    */
+/*     $NetBSD: uvm_readahead.c,v 1.13 2020/05/19 21:45:35 ad Exp $    */
 
 /*-
  * Copyright (c)2003, 2005, 2009 YAMAMOTO Takashi,
@@ -40,7 +40,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.12 2020/03/08 18:40:29 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.13 2020/05/19 21:45:35 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/pool.h>
@@ -126,6 +126,8 @@
        DPRINTF(("%s: uobj=%p, off=%" PRIu64 ", endoff=%" PRIu64 "\n",
            __func__, uobj, off, endoff));
 
+       KASSERT(rw_write_held(uobj->vmobjlock));
+
        /*
         * Don't issue read-ahead if the last page of the range is already cached.
         * The assumption is that since the access is sequential, the intermediate
@@ -133,9 +135,7 @@
         * too. This speeds up I/O using cache, since it avoids lookups and temporary
         * allocations done by full pgo_get.
         */
-       rw_enter(uobj->vmobjlock, RW_READER);
        struct vm_page *pg = uvm_pagelookup(uobj, trunc_page(endoff - 1));
-       rw_exit(uobj->vmobjlock);
        if (pg != NULL) {
                DPRINTF(("%s:  off=%" PRIu64 ", sz=%zu already cached\n",
                    __func__, off, sz));
@@ -162,9 +162,9 @@
                 * use UVM_ADV_RANDOM to avoid recursion.
                 */
 
-               rw_enter(uobj->vmobjlock, RW_WRITER);
                error = (*uobj->pgops->pgo_get)(uobj, off, NULL,
                    &npages, 0, VM_PROT_READ, UVM_ADV_RANDOM, PGO_NOTIMESTAMP);
+               rw_enter(uobj->vmobjlock, RW_WRITER);
                DPRINTF(("%s:  off=%" PRIu64 ", bytelen=%zu -> %d\n",
                    __func__, off, bytelen, error));
                if (error != 0 && error != EBUSY) {
@@ -332,9 +332,7 @@
                if (rasize >= RA_MINSIZE) {
                        off_t next;
 
-                       rw_exit(uobj->vmobjlock);
                        next = ra_startio(uobj, raoff, rasize);
-                       rw_enter(uobj->vmobjlock, RW_WRITER);
                        ra->ra_next = next;
                }
        }
@@ -362,6 +360,8 @@
        if (size > RA_WINSIZE_MAX) {
                size = RA_WINSIZE_MAX;
        }
+       rw_enter(uobj->vmobjlock, RW_WRITER);
        ra_startio(uobj, off, size);
+       rw_exit(uobj->vmobjlock);
        return 0;
 }



Home | Main Index | Thread Index | Old Index