Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/uvm/pmap Support __HAVE_PMAP_PV_TRACK in sys/uvm/pmap ba...



details:   https://anonhg.NetBSD.org/src/rev/178e9d3b9b7d
branches:  trunk
changeset: 948203:178e9d3b9b7d
user:      skrll <skrll%NetBSD.org@localhost>
date:      Sun Dec 20 16:38:25 2020 +0000

description:
Support __HAVE_PMAP_PV_TRACK in sys/uvm/pmap based pmaps (aka common pmap)

diffstat:

 sys/arch/m68k/include/pmap_coldfire.h |    8 +-
 sys/arch/mips/include/pmap.h          |   18 ++++-
 sys/arch/mips/mips/pmap_machdep.c     |   98 +++++++++++++++++--------------
 sys/arch/powerpc/booke/booke_pmap.c   |   10 ++-
 sys/arch/powerpc/include/booke/pmap.h |   10 +-
 sys/arch/riscv/include/pmap.h         |   12 +-
 sys/arch/riscv/riscv/trap.c           |    7 +-
 sys/uvm/pmap/pmap.c                   |  106 ++++++++++++++++++++++++---------
 sys/uvm/pmap/pmap.h                   |    7 +-
 sys/uvm/pmap/vmpagemd.h               |   17 +++--
 sys/uvm/uvm_page.h                    |    3 +-
 11 files changed, 187 insertions(+), 109 deletions(-)

diffs (truncated from 837 to 300 lines):

diff -r 04775be44e42 -r 178e9d3b9b7d sys/arch/m68k/include/pmap_coldfire.h
--- a/sys/arch/m68k/include/pmap_coldfire.h     Sun Dec 20 15:59:28 2020 +0000
+++ b/sys/arch/m68k/include/pmap_coldfire.h     Sun Dec 20 16:38:25 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap_coldfire.h,v 1.3 2020/08/07 07:19:45 skrll Exp $  */
+/*     $NetBSD: pmap_coldfire.h,v 1.4 2020/12/20 16:38:25 skrll Exp $  */
 /*-
  * Copyright (c) 2013 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -92,7 +92,7 @@
 #endif
 #endif
 
-void   pmap_md_page_syncicache(struct vm_page *, const kcpuset_t *);
+void   pmap_md_page_syncicache(struct vm_page_md *, const kcpuset_t *);
 vaddr_t        pmap_bootstrap(vaddr_t, vaddr_t, phys_ram_seg_t *, size_t);
 bool   pmap_extract(struct pmap *, vaddr_t, paddr_t *);
 
@@ -114,7 +114,7 @@
  * Virtual Cache Alias helper routines.  Not a problem for Booke CPUs.
  */
 static inline bool
-pmap_md_vca_add(struct vm_page *pg, vaddr_t va, pt_entry_t *nptep)
+pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep)
 {
        return false;
 }
@@ -126,7 +126,7 @@
 }
 
 static inline void
-pmap_md_vca_clean(struct vm_page *pg, vaddr_t va, int op)
+pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op)
 {
 }
 
diff -r 04775be44e42 -r 178e9d3b9b7d sys/arch/mips/include/pmap.h
--- a/sys/arch/mips/include/pmap.h      Sun Dec 20 15:59:28 2020 +0000
+++ b/sys/arch/mips/include/pmap.h      Sun Dec 20 16:38:25 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.h,v 1.74 2020/08/17 03:19:35 mrg Exp $    */
+/*     $NetBSD: pmap.h,v 1.75 2020/12/20 16:38:25 skrll Exp $  */
 
 /*
  * Copyright (c) 1992, 1993
@@ -93,6 +93,7 @@
 #define        KERNEL_PID                      0
 
 #if defined(__PMAP_PRIVATE)
+struct vm_page_md;
 
 #include <mips/locore.h>
 #include <mips/cache.h>
@@ -122,9 +123,9 @@
 void   pmap_md_init(void);
 void   pmap_md_icache_sync_all(void);
 void   pmap_md_icache_sync_range_index(vaddr_t, vsize_t);
-void   pmap_md_page_syncicache(struct vm_page *, const kcpuset_t *);
-bool   pmap_md_vca_add(struct vm_page *, vaddr_t, pt_entry_t *);
-void   pmap_md_vca_clean(struct vm_page *, int);
+void   pmap_md_page_syncicache(struct vm_page_md *, const kcpuset_t *);
+bool   pmap_md_vca_add(struct vm_page_md *, vaddr_t, pt_entry_t *);
+void   pmap_md_vca_clean(struct vm_page_md *, int);
 void   pmap_md_vca_remove(struct vm_page *, vaddr_t, bool, bool);
 bool   pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
 bool   pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t);
@@ -178,6 +179,7 @@
 #include <uvm/uvm_pmap.h>
 #include <uvm/pmap/vmpagemd.h>
 #include <uvm/pmap/pmap.h>
+#include <uvm/pmap/pmap_pvt.h>
 #include <uvm/pmap/pmap_tlb.h>
 #include <uvm/pmap/pmap_synci.h>
 
@@ -270,5 +272,13 @@
 #define        PMAP_CCA_FOR_PA(pa)     sbmips_cca_for_pa(pa)
 #endif
 
+#ifdef __HAVE_PMAP_PV_TRACK
+struct pmap_page {
+        struct vm_page_md       pp_md;
+};
+
+#define PMAP_PAGE_TO_MD(ppage)  (&((ppage)->pp_md))
+#endif
+
 #endif /* _KERNEL */
 #endif /* _MIPS_PMAP_H_ */
diff -r 04775be44e42 -r 178e9d3b9b7d sys/arch/mips/mips/pmap_machdep.c
--- a/sys/arch/mips/mips/pmap_machdep.c Sun Dec 20 15:59:28 2020 +0000
+++ b/sys/arch/mips/mips/pmap_machdep.c Sun Dec 20 16:38:25 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap_machdep.c,v 1.32 2020/12/20 15:59:28 skrll Exp $  */
+/*     $NetBSD: pmap_machdep.c,v 1.33 2020/12/20 16:38:25 skrll Exp $  */
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.32 2020/12/20 15:59:28 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.33 2020/12/20 16:38:25 skrll Exp $");
 
 /*
  *     Manages physical address maps.
@@ -170,17 +170,19 @@
 #define pmap_md_cache_indexof(x)       (((vaddr_t)(x)) & pmap_page_cache_alias_mask)
 
 static register_t
-pmap_md_map_ephemeral_page(struct vm_page *pg, bool locked_p, int prot,
+pmap_md_map_ephemeral_page(struct vm_page_md *mdpg, bool locked_p, int prot,
     pt_entry_t *old_pte_p)
 {
+       KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
+
+       struct vm_page *pg = VM_MD_TO_PAGE(mdpg);
        const paddr_t pa = VM_PAGE_TO_PHYS(pg);
-       struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
        pv_entry_t pv = &mdpg->mdpg_first;
        register_t va = 0;
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pg=%p, prot=%d, ptep=%p)",
-           pg, prot, old_pte_p, 0);
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pg=%#jx, prot=%d, ptep=%#jx)",
+           (uintptr_t)pg, prot, (uintptr_t)old_pte_p, 0);
 
        KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
 
@@ -260,15 +262,16 @@
 }
 
 static void
-pmap_md_unmap_ephemeral_page(struct vm_page *pg, bool locked_p, register_t va,
-       pt_entry_t old_pte)
+pmap_md_unmap_ephemeral_page(struct vm_page_md *mdpg, bool locked_p,
+    register_t va, pt_entry_t old_pte)
 {
-       struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
+       KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
+
        pv_entry_t pv = &mdpg->mdpg_first;
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pg=%p, va=%#lx, pte=%#"PRIxPTE")",
-           pg, va, pte_value(old_pte), 0);
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pg=%#jx, va=%#lx, pte=%#"PRIxPTE")",
+           (uintptr_t)VM_MD_TO_PAGE(mdpg), va, pte_value(old_pte), 0);
 
        KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
 
@@ -305,17 +308,17 @@
 }
 
 static void
-pmap_md_vca_page_wbinv(struct vm_page *pg, bool locked_p)
+pmap_md_vca_page_wbinv(struct vm_page_md *mdpg, bool locked_p)
 {
        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
        pt_entry_t pte;
 
-       const register_t va = pmap_md_map_ephemeral_page(pg, locked_p,
+       const register_t va = pmap_md_map_ephemeral_page(mdpg, locked_p,
            VM_PROT_READ, &pte);
 
        mips_dcache_wbinv_range(va, PAGE_SIZE);
 
-       pmap_md_unmap_ephemeral_page(pg, locked_p, va, pte);
+       pmap_md_unmap_ephemeral_page(mdpg, locked_p, va, pte);
 }
 
 bool
@@ -609,20 +612,21 @@
 {
        pt_entry_t dst_pte;
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(pa=%#"PRIxPADDR")", dst_pa, 0, 0, 0);
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(pa=%#"PRIxPADDR")", dst_pa, 0, 0, 0);
        PMAP_COUNT(zeroed_pages);
 
        struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst_pa);
+       struct vm_page_md * const dst_mdpg = VM_PAGE_TO_MD(dst_pg);
 
-       KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(dst_pg)));
+       KASSERT(!VM_PAGEMD_EXECPAGE_P(dst_mdpg));
 
-       const register_t dst_va = pmap_md_map_ephemeral_page(dst_pg, false,
+       const register_t dst_va = pmap_md_map_ephemeral_page(dst_mdpg, false,
            VM_PROT_READ|VM_PROT_WRITE, &dst_pte);
 
        mips_pagezero(dst_va);
 
-       pmap_md_unmap_ephemeral_page(dst_pg, false, dst_va, dst_pte);
+       pmap_md_unmap_ephemeral_page(dst_mdpg, false, dst_va, dst_pte);
 
        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
 }
@@ -635,39 +639,41 @@
 {
        pt_entry_t src_pte, dst_pte;
 
-       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       UVMHIST_LOG(pmaphist, "(src_pa=%#lx, dst_pa=%#lx)", src_pa, dst_pa, 0, 0);
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "(src_pa=%#lx, dst_pa=%#lx)", src_pa, dst_pa,
+           0, 0);
        PMAP_COUNT(copied_pages);
 
        struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src_pa);
        struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst_pa);
 
-       const register_t src_va = pmap_md_map_ephemeral_page(src_pg, false,
+       struct vm_page_md * const src_mdpg = VM_PAGE_TO_MD(src_pg);
+       struct vm_page_md * const dst_mdpg = VM_PAGE_TO_MD(dst_pg);
+
+       const register_t src_va = pmap_md_map_ephemeral_page(src_mdpg, false,
            VM_PROT_READ, &src_pte);
 
-       KASSERT(VM_PAGEMD_PVLIST_EMPTY_P(VM_PAGE_TO_MD(dst_pg)));
-       KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(dst_pg)));
-       const register_t dst_va = pmap_md_map_ephemeral_page(dst_pg, false,
+       KASSERT(VM_PAGEMD_PVLIST_EMPTY_P(dst_mdpg));
+       KASSERT(!VM_PAGEMD_EXECPAGE_P(dst_mdpg));
+       const register_t dst_va = pmap_md_map_ephemeral_page(dst_mdpg, false,
            VM_PROT_READ|VM_PROT_WRITE, &dst_pte);
 
        mips_pagecopy(dst_va, src_va);
 
-       pmap_md_unmap_ephemeral_page(dst_pg, false, dst_va, dst_pte);
-       pmap_md_unmap_ephemeral_page(src_pg, false, src_va, src_pte);
+       pmap_md_unmap_ephemeral_page(dst_mdpg, false, dst_va, dst_pte);
+       pmap_md_unmap_ephemeral_page(src_mdpg, false, src_va, src_pte);
 
        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
 }
 
 void
-pmap_md_page_syncicache(struct vm_page *pg, const kcpuset_t *onproc)
+pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc)
 {
        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
        struct mips_options * const opts = &mips_options;
        if (opts->mips_cpu_flags & CPU_MIPS_I_D_CACHE_COHERENT)
                return;
 
-       struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
-
        /*
         * If onproc is empty, we could do a
         * pmap_page_protect(pg, VM_PROT_NONE) and remove all
@@ -679,16 +685,19 @@
                if (VM_PAGEMD_CACHED_P(mdpg)) {
                        /* This was probably mapped cached by UBC so flush it */
                        pt_entry_t pte;
-                       const register_t tva = pmap_md_map_ephemeral_page(pg, false,
-                           VM_PROT_READ, &pte);
+                       const register_t tva = pmap_md_map_ephemeral_page(mdpg,
+                           false, VM_PROT_READ, &pte);
 
                        UVMHIST_LOG(pmaphist, "  va %#"PRIxVADDR, tva, 0, 0, 0);
                        mips_dcache_wbinv_range(tva, PAGE_SIZE);
                        mips_icache_sync_range(tva, PAGE_SIZE);
 
-                       pmap_md_unmap_ephemeral_page(pg, false, tva, pte);
+                       pmap_md_unmap_ephemeral_page(mdpg, false, tva, pte);
                }
        } else {
+               KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
+
+               struct vm_page *pg = VM_MD_TO_PAGE(mdpg);
                mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)),
                    PAGE_SIZE);
        }
@@ -738,7 +747,7 @@
                 */
                if (MIPS_CACHE_VIRTUAL_ALIAS
                    && mips_cache_badalias(last_va, va)) {
-                       pmap_md_vca_page_wbinv(pg, false);
+                       pmap_md_vca_page_wbinv(mdpg, false);
                }
 
                pv->pv_va = va;
@@ -910,10 +919,9 @@
 }
 
 bool
-pmap_md_vca_add(struct vm_page *pg, vaddr_t va, pt_entry_t *ptep)
+pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *ptep)
 {
        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
-       struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
        if (!MIPS_HAS_R4K_MMU || !MIPS_CACHE_VIRTUAL_ALIAS)
                return false;
 
@@ -979,8 +987,8 @@



Home | Main Index | Thread Index | Old Index