Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys Rename uvm_page_locked_p() -> uvm_page_owner_locked_p()



details:   https://anonhg.NetBSD.org/src/rev/6b1b953cddca
branches:  trunk
changeset: 466697:6b1b953cddca
user:      ad <ad%NetBSD.org@localhost>
date:      Tue Dec 31 12:40:27 2019 +0000

description:
Rename uvm_page_locked_p() -> uvm_page_owner_locked_p()

diffstat:

 sys/arch/hppa/hppa/pmap.c       |   8 ++++----
 sys/arch/x86/x86/pmap.c         |  18 +++++++++---------
 sys/miscfs/genfs/genfs_io.c     |   6 +++---
 sys/rump/librump/rumpkern/vm.c  |   8 ++++----
 sys/uvm/uvm_page.c              |  26 +++++++++++++-------------
 sys/uvm/uvm_page.h              |   6 +++---
 sys/uvm/uvm_pdaemon.c           |   6 +++---
 sys/uvm/uvm_pdpolicy_clock.c    |   6 +++---
 sys/uvm/uvm_pdpolicy_clockpro.c |   6 +++---
 9 files changed, 45 insertions(+), 45 deletions(-)

diffs (truncated from 399 to 300 lines):

diff -r 5c9d1222b12d -r 6b1b953cddca sys/arch/hppa/hppa/pmap.c
--- a/sys/arch/hppa/hppa/pmap.c Tue Dec 31 12:27:50 2019 +0000
+++ b/sys/arch/hppa/hppa/pmap.c Tue Dec 31 12:40:27 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.101 2019/12/15 21:11:34 ad Exp $    */
+/*     $NetBSD: pmap.c,v 1.102 2019/12/31 12:40:27 ad Exp $    */
 
 /*-
  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
@@ -65,7 +65,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.101 2019/12/15 21:11:34 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.102 2019/12/31 12:40:27 ad Exp $");
 
 #include "opt_cputype.h"
 
@@ -579,7 +579,7 @@
        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n",
            __func__, pg, pve, pm, va, pdep, flags));
 
-       KASSERT(pm == pmap_kernel() || uvm_page_locked_p(pg));
+       KASSERT(pm == pmap_kernel() || uvm_page_owner_locked_p(pg));
 
        pve->pv_pmap = pm;
        pve->pv_va = va | flags;
@@ -594,7 +594,7 @@
        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
        struct pv_entry **pve, *pv;
 
-       KASSERT(pmap == pmap_kernel() || uvm_page_locked_p(pg));
+       KASSERT(pmap == pmap_kernel() || uvm_page_owner_locked_p(pg));
 
        for (pv = *(pve = &md->pvh_list);
            pv; pv = *(pve = &(*pve)->pv_next)) {
diff -r 5c9d1222b12d -r 6b1b953cddca sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c   Tue Dec 31 12:27:50 2019 +0000
+++ b/sys/arch/x86/x86/pmap.c   Tue Dec 31 12:40:27 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.348 2019/12/22 15:15:20 ad Exp $    */
+/*     $NetBSD: pmap.c,v 1.349 2019/12/31 12:40:27 ad Exp $    */
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017, 2019 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.348 2019/12/22 15:15:20 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.349 2019/12/31 12:40:27 ad Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -3530,7 +3530,7 @@
        }
 
        if ((pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) {
-               KASSERT(uvm_page_locked_p(pg));
+               KASSERT(uvm_page_owner_locked_p(pg));
                pp = VM_PAGE_TO_PP(pg);
        } else if ((pp = pmap_pv_tracked(pmap_pte2pa(opte))) == NULL) {
                paddr_t pa = pmap_pte2pa(opte);
@@ -3868,7 +3868,7 @@
        struct pmap_page *pp;
        paddr_t pa;
 
-       KASSERT(uvm_page_locked_p(pg));
+       KASSERT(uvm_page_owner_locked_p(pg));
 
        pp = VM_PAGE_TO_PP(pg);
        pa = VM_PAGE_TO_PHYS(pg);
@@ -3909,7 +3909,7 @@
        u_int result;
        paddr_t pa;
 
-       KASSERT(uvm_page_locked_p(pg));
+       KASSERT(uvm_page_owner_locked_p(pg));
 
        pp = VM_PAGE_TO_PP(pg);
        if ((pp->pp_attrs & testbits) != 0) {
@@ -3982,7 +3982,7 @@
        struct pmap_page *pp;
        paddr_t pa;
 
-       KASSERT(uvm_page_locked_p(pg));
+       KASSERT(uvm_page_owner_locked_p(pg));
 
        pp = VM_PAGE_TO_PP(pg);
        pa = VM_PAGE_TO_PHYS(pg);
@@ -4374,7 +4374,7 @@
         */
        if ((~opte & (PTE_P | PTE_PVLIST)) == 0) {
                if ((old_pg = PHYS_TO_VM_PAGE(oldpa)) != NULL) {
-                       KASSERT(uvm_page_locked_p(old_pg));
+                       KASSERT(uvm_page_owner_locked_p(old_pg));
                        old_pp = VM_PAGE_TO_PP(old_pg);
                } else if ((old_pp = pmap_pv_tracked(oldpa)) == NULL) {
                        panic("%s: PTE_PVLIST with pv-untracked page"
@@ -5271,7 +5271,7 @@
         */
        if ((~opte & (EPT_R | EPT_PVLIST)) == 0) {
                if ((old_pg = PHYS_TO_VM_PAGE(oldpa)) != NULL) {
-                       KASSERT(uvm_page_locked_p(old_pg));
+                       KASSERT(uvm_page_owner_locked_p(old_pg));
                        old_pp = VM_PAGE_TO_PP(old_pg);
                } else if ((old_pp = pmap_pv_tracked(oldpa)) == NULL) {
                        panic("%s: EPT_PVLIST with pv-untracked page"
@@ -5444,7 +5444,7 @@
        }
 
        if ((pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) {
-               KASSERT(uvm_page_locked_p(pg));
+               KASSERT(uvm_page_owner_locked_p(pg));
                pp = VM_PAGE_TO_PP(pg);
        } else if ((pp = pmap_pv_tracked(pmap_pte2pa(opte))) == NULL) {
                paddr_t pa = pmap_pte2pa(opte);
diff -r 5c9d1222b12d -r 6b1b953cddca sys/miscfs/genfs/genfs_io.c
--- a/sys/miscfs/genfs/genfs_io.c       Tue Dec 31 12:27:50 2019 +0000
+++ b/sys/miscfs/genfs/genfs_io.c       Tue Dec 31 12:40:27 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: genfs_io.c,v 1.81 2019/12/16 18:17:32 ad Exp $ */
+/*     $NetBSD: genfs_io.c,v 1.82 2019/12/31 12:40:27 ad Exp $ */
 
 /*
  * Copyright (c) 1982, 1986, 1989, 1993
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.81 2019/12/16 18:17:32 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.82 2019/12/31 12:40:27 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -75,7 +75,7 @@
 
                if (pg == NULL || pg == PGO_DONTCARE)
                        continue;
-               KASSERT(uvm_page_locked_p(pg));
+               KASSERT(uvm_page_owner_locked_p(pg));
                if (pg->flags & PG_FAKE) {
                        pg->flags |= PG_RELEASED;
                }
diff -r 5c9d1222b12d -r 6b1b953cddca sys/rump/librump/rumpkern/vm.c
--- a/sys/rump/librump/rumpkern/vm.c    Tue Dec 31 12:27:50 2019 +0000
+++ b/sys/rump/librump/rumpkern/vm.c    Tue Dec 31 12:40:27 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: vm.c,v 1.177 2019/12/21 12:59:12 ad Exp $      */
+/*     $NetBSD: vm.c,v 1.178 2019/12/31 12:40:27 ad Exp $      */
 
 /*
  * Copyright (c) 2007-2011 Antti Kantee.  All Rights Reserved.
@@ -41,7 +41,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.177 2019/12/21 12:59:12 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.178 2019/12/31 12:40:27 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -238,12 +238,12 @@
 }
 
 /*
- * uvm_page_locked_p: return true if object associated with page is
+ * uvm_page_owner_locked_p: return true if object associated with page is
  * locked.  this is a weak check for runtime assertions only.
  */
 
 bool
-uvm_page_locked_p(struct vm_page *pg)
+uvm_page_owner_locked_p(struct vm_page *pg)
 {
 
        return mutex_owned(pg->uobject->vmobjlock);
diff -r 5c9d1222b12d -r 6b1b953cddca sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c        Tue Dec 31 12:27:50 2019 +0000
+++ b/sys/uvm/uvm_page.c        Tue Dec 31 12:40:27 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_page.c,v 1.217 2019/12/30 17:45:53 ad Exp $        */
+/*     $NetBSD: uvm_page.c,v 1.218 2019/12/31 12:40:27 ad Exp $        */
 
 /*-
  * Copyright (c) 2019 The NetBSD Foundation, Inc.
@@ -95,7 +95,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.217 2019/12/30 17:45:53 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.218 2019/12/31 12:40:27 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvm.h"
@@ -1304,7 +1304,7 @@
        pg->offset = off;
        pg->uobject = obj;
        pg->uanon = anon;
-       KASSERT(uvm_page_locked_p(pg));
+       KASSERT(uvm_page_owner_locked_p(pg));
        pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
        if (anon) {
                anon->an_page = pg;
@@ -1636,7 +1636,7 @@
                        continue;
                }
 
-               KASSERT(uvm_page_locked_p(pg));
+               KASSERT(uvm_page_owner_locked_p(pg));
                KASSERT(pg->flags & PG_BUSY);
                KASSERT((pg->flags & PG_PAGEOUT) == 0);
                if (pg->flags & PG_WANTED) {
@@ -1676,7 +1676,7 @@
 
        KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
        KASSERT((pg->flags & PG_WANTED) == 0);
-       KASSERT(uvm_page_locked_p(pg));
+       KASSERT(uvm_page_owner_locked_p(pg));
 
        /* gain ownership? */
        if (tag) {
@@ -1750,7 +1750,7 @@
 uvm_pagewire(struct vm_page *pg)
 {
 
-       KASSERT(uvm_page_locked_p(pg));
+       KASSERT(uvm_page_owner_locked_p(pg));
 #if defined(READAHEAD_STATS)
        if ((pg->flags & PG_READAHEAD) != 0) {
                uvm_ra_hit.ev_count++;
@@ -1778,7 +1778,7 @@
 uvm_pageunwire(struct vm_page *pg)
 {
 
-       KASSERT(uvm_page_locked_p(pg));
+       KASSERT(uvm_page_owner_locked_p(pg));
        KASSERT(pg->wire_count != 0);
        KASSERT(!uvmpdpol_pageisqueued_p(pg));
        mutex_enter(&pg->interlock);
@@ -1804,7 +1804,7 @@
 uvm_pagedeactivate(struct vm_page *pg)
 {
 
-       KASSERT(uvm_page_locked_p(pg));
+       KASSERT(uvm_page_owner_locked_p(pg));
        if (pg->wire_count == 0) {
                KASSERT(uvmpdpol_pageisqueued_p(pg));
                uvmpdpol_pagedeactivate(pg);
@@ -1821,7 +1821,7 @@
 uvm_pageactivate(struct vm_page *pg)
 {
 
-       KASSERT(uvm_page_locked_p(pg));
+       KASSERT(uvm_page_owner_locked_p(pg));
 #if defined(READAHEAD_STATS)
        if ((pg->flags & PG_READAHEAD) != 0) {
                uvm_ra_hit.ev_count++;
@@ -1842,7 +1842,7 @@
 uvm_pagedequeue(struct vm_page *pg)
 {
 
-       KASSERT(uvm_page_locked_p(pg));
+       KASSERT(uvm_page_owner_locked_p(pg));
        if (uvmpdpol_pageisqueued_p(pg)) {
                uvmpdpol_pagedequeue(pg);
        }
@@ -1858,7 +1858,7 @@
 uvm_pageenqueue(struct vm_page *pg)
 {
 
-       KASSERT(uvm_page_locked_p(pg));
+       KASSERT(uvm_page_owner_locked_p(pg));
        if (pg->wire_count == 0 && !uvmpdpol_pageisqueued_p(pg)) {
                uvmpdpol_pageenqueue(pg);
        }
@@ -1919,12 +1919,12 @@
 }
 
 /*
- * uvm_page_locked_p: return true if object associated with page is
+ * uvm_page_owner_locked_p: return true if object associated with page is
  * locked.  this is a weak check for runtime assertions only.
  */
 
 bool
-uvm_page_locked_p(struct vm_page *pg)
+uvm_page_owner_locked_p(struct vm_page *pg)
 {
 
        if (pg->uobject != NULL) {
diff -r 5c9d1222b12d -r 6b1b953cddca sys/uvm/uvm_page.h
--- a/sys/uvm/uvm_page.h        Tue Dec 31 12:27:50 2019 +0000
+++ b/sys/uvm/uvm_page.h        Tue Dec 31 12:40:27 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_page.h,v 1.90 2019/12/27 13:13:17 ad Exp $ */
+/*     $NetBSD: uvm_page.h,v 1.91 2019/12/31 12:40:27 ad Exp $ */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.



Home | Main Index | Thread Index | Old Index