Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm/pmap Use UVMHIST_CALLARGS



details:   https://anonhg.NetBSD.org/src/rev/3755db998d60
branches:  trunk
changeset: 1009092:3755db998d60
user:      skrll <skrll%NetBSD.org@localhost>
date:      Sun Apr 12 15:36:18 2020 +0000

description:
Use UVMHIST_CALLARGS

diffstat:

 sys/uvm/pmap/pmap.c     |  109 +++++++++++++++++++++++------------------------
 sys/uvm/pmap/pmap_tlb.c |   37 ++++++++-------
 2 files changed, 73 insertions(+), 73 deletions(-)

diffs (truncated from 405 to 300 lines):

diff -r bb2d105d9b1f -r 3755db998d60 sys/uvm/pmap/pmap.c
--- a/sys/uvm/pmap/pmap.c       Sun Apr 12 13:57:07 2020 +0000
+++ b/sys/uvm/pmap/pmap.c       Sun Apr 12 15:36:18 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.48 2020/03/14 14:05:44 ad Exp $     */
+/*     $NetBSD: pmap.c,v 1.49 2020/04/12 15:36:18 skrll Exp $  */
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.48 2020/03/14 14:05:44 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.49 2020/04/12 15:36:18 skrll Exp $");
 
 /*
  *     Manages physical address maps.
@@ -581,7 +581,8 @@
        UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf);
        UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLED(pmaphist);
 
        /*
         * Initialize the segtab lock.
@@ -623,7 +624,8 @@
 pmap_t
 pmap_create(void)
 {
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLED(pmaphist);
        PMAP_COUNT(create);
 
        pmap_t pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
@@ -658,8 +660,8 @@
 void
 pmap_destroy(pmap_t pmap)
 {
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
 
        if (atomic_dec_uint_nv(&pmap->pm_count) > 0) {
                PMAP_COUNT(dereference);
@@ -694,8 +696,8 @@
 void
 pmap_reference(pmap_t pmap)
 {
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
        PMAP_COUNT(reference);
 
        if (pmap != NULL) {
@@ -713,8 +715,8 @@
 {
        pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
            (uintptr_t)pmap, 0, 0);
        PMAP_COUNT(activate);
 
@@ -744,9 +746,8 @@
        VM_PAGEMD_PVLIST_LOCK(mdpg);
        pmap_pvlist_check(mdpg);
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-
-       UVMHIST_LOG(pmapexechist, "pg %#jx (pa %#jx) [page removed]: "
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmapexechist, "pg %#jx (pa %#jx) [page removed]: "
            "execpage cleared", (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
        pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE|VM_PAGEMD_UNCACHED);
@@ -862,8 +863,8 @@
 {
        pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
            (uintptr_t)pmap, 0, 0);
        PMAP_COUNT(deactivate);
 
@@ -885,8 +886,8 @@
 void
 pmap_update(struct pmap *pmap)
 {
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
        PMAP_COUNT(update);
 
        kpreempt_disable();
@@ -930,10 +931,10 @@
        const pt_entry_t npte = flags;
        const bool is_kernel_pmap_p = (pmap == pmap_kernel());
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pmap=%#jx kernel=%c va=%#jx..%#jx)",
-           (uintptr_t)pmap, (is_kernel_pmap_p ? 1 : 0), sva, eva);
-       UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jx va=%#jx..%#jx)",
+           (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva);
+       UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
            (uintptr_t)ptep, flags, 0, 0);
 
        KASSERT(kpreempt_disabled());
@@ -977,8 +978,8 @@
        const bool is_kernel_pmap_p = (pmap == pmap_kernel());
        const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pmap=%#jx, va=%#jx..%#jx)",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx)",
            (uintptr_t)pmap, sva, eva, 0);
 
        if (is_kernel_pmap_p) {
@@ -1011,8 +1012,8 @@
        pv_entry_t pv;
        vaddr_t va;
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pg=%#jx (pa %#jx) prot=%#jx)",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) prot=%#jx)",
            (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), prot, 0);
        PMAP_COUNT(page_protect);
 
@@ -1074,8 +1075,8 @@
 {
        const vm_prot_t prot = (flags & VM_PROT_ALL);
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pmap=%#jx kernel=%jx va=%#jx..%#jx)",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jx va=%#jx..%#jx)",
            (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva);
        UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
            (uintptr_t)ptep, flags, 0, 0);
@@ -1132,8 +1133,8 @@
 void
 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
 {
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pmap=%#jx, va=%#jx..%#jx, prot=%ju)",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx, prot=%ju)",
            (uintptr_t)pmap, sva, eva, prot);
        PMAP_COUNT(protect);
 
@@ -1165,8 +1166,8 @@
 {
        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pg=%#jx (pa %#jx) cached=%jd)",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) cached=%jd)",
            (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), cached, 0);
 
        KASSERT(kpreempt_disabled());
@@ -1226,8 +1227,8 @@
            ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
 #endif
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(*histp);
-       UVMHIST_LOG(*histp, "(pmap=%#jx, va=%#jx, pa=%#jx",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(*histp, "(pmap=%#jx, va=%#jx, pa=%#jx",
            (uintptr_t)pmap, va, pa, 0);
        UVMHIST_LOG(*histp, "prot=%#jx flags=%#jx)", prot, flags, 0, 0);
 
@@ -1376,8 +1377,8 @@
        struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
        struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(va=%#jx pa=%#jx prot=%ju, flags=%#jx)",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(va=%#jx pa=%#jx prot=%ju, flags=%#jx)",
            va, pa, prot, flags);
        PMAP_COUNT(kenter_pa);
 
@@ -1442,9 +1443,8 @@
 {
        const pt_entry_t new_pte = pte_nv_entry(true);
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist,
-           "(pmap=%#jx, sva=%#jx eva=%#jx ptep=%#jx)",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, sva=%#jx eva=%#jx ptep=%#jx)",
            (uintptr_t)pmap, sva, eva, (uintptr_t)ptep);
 
        KASSERT(kpreempt_disabled());
@@ -1479,8 +1479,8 @@
        const vaddr_t sva = trunc_page(va);
        const vaddr_t eva = round_page(va + len);
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(va=%#jx len=%#jx)", va, len, 0, 0);
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(va=%#jx len=%#jx)", va, len, 0, 0);
 
        kpreempt_disable();
        pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
@@ -1492,8 +1492,8 @@
 bool
 pmap_remove_all(struct pmap *pmap)
 {
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pm=%#jx)", (uintptr_t)pmap, 0, 0, 0);
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pm=%#jx)", (uintptr_t)pmap, 0, 0, 0);
 
        KASSERT(pmap != pmap_kernel());
 
@@ -1537,8 +1537,8 @@
 void
 pmap_unwire(pmap_t pmap, vaddr_t va)
 {
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pmap=%#jx, va=%#jx)", (uintptr_t)pmap, va,
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx)", (uintptr_t)pmap, va,
            0, 0);
        PMAP_COUNT(unwire);
 
@@ -1621,7 +1621,8 @@
 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
     vaddr_t src_addr)
 {
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLED(pmaphist);
        PMAP_COUNT(copy);
 }
 
@@ -1635,8 +1636,8 @@
 {
        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pg=%#jx (pa %#jx))",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx))",
           (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
 
        bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
@@ -1668,8 +1669,8 @@
        pv_entry_t pv = &mdpg->mdpg_first;
        pv_entry_t pv_next;
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pg=%#jx (%#jx))",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (%#jx))",
            (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
        PMAP_COUNT(clear_modify);
 
@@ -1814,9 +1815,8 @@
        bool first = false;
 #endif
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist,
-           "(pmap=%#jx va=%#jx pg=%#jx (%#jx)",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx va=%#jx pg=%#jx (%#jx)",
            (uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg));
        UVMHIST_LOG(pmaphist, "nptep=%#jx (%#jx))",
            (uintptr_t)nptep, pte_value(*nptep), 0, 0);
@@ -1957,9 +1957,8 @@
        pv_entry_t pv, npv;
        bool last;
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist,
-           "(pmap=%#jx, va=%#jx, pg=%#jx (pa %#jx)",
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx, pg=%#jx (pa %#jx)",
            (uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg));
        UVMHIST_LOG(pmaphist, "dirty=%ju)", dirty, 0, 0, 0);
 
diff -r bb2d105d9b1f -r 3755db998d60 sys/uvm/pmap/pmap_tlb.c
--- a/sys/uvm/pmap/pmap_tlb.c   Sun Apr 12 13:57:07 2020 +0000



Home | Main Index | Thread Index | Old Index