Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src Provide MI PMAP support on AARCH64



details:   https://anonhg.NetBSD.org/src/rev/60cc62d74561
branches:  trunk
changeset: 372245:60cc62d74561
user:      skrll <skrll%NetBSD.org@localhost>
date:      Thu Nov 03 09:04:56 2022 +0000

description:
Provide MI PMAP support on AARCH64

diffstat:

 etc/etc.evbarm/Makefile.inc             |    6 +-
 sys/arch/aarch64/aarch64/pmap_machdep.c |  780 ++++++++++++++++++++++++++++++++
 sys/arch/aarch64/conf/files.aarch64     |   13 +-
 sys/arch/aarch64/include/cpu.h          |   10 +-
 sys/arch/aarch64/include/pmap.h         |  285 ++++++-----
 sys/arch/aarch64/include/pmap_machdep.h |  519 +++++++++++++++++++++
 sys/arch/aarch64/include/types.h        |   11 +-
 sys/arch/evbarm/conf/GENERIC64_PMAPMI   |   12 +
 sys/uvm/pmap/pmap.c                     |   36 +-
 sys/uvm/pmap/pmap.h                     |    4 +-
 10 files changed, 1529 insertions(+), 147 deletions(-)

diffs (truncated from 1901 to 300 lines):

diff -r a8782a995570 -r 60cc62d74561 etc/etc.evbarm/Makefile.inc
--- a/etc/etc.evbarm/Makefile.inc       Wed Nov 02 20:38:21 2022 +0000
+++ b/etc/etc.evbarm/Makefile.inc       Thu Nov 03 09:04:56 2022 +0000
@@ -1,4 +1,4 @@
-#      $NetBSD: Makefile.inc,v 1.130 2022/10/29 08:52:47 jmcneill Exp $
+#      $NetBSD: Makefile.inc,v 1.131 2022/11/03 09:04:56 skrll Exp $
 #
 #      etc.evbarm/Makefile.inc -- evbarm-specific etc Makefile targets
 #
@@ -31,7 +31,9 @@
 
 KERNEL_SETS.armv7+=            GENERIC
 KERNEL_SETS.armv7hf+=          GENERIC
+
 KERNEL_SETS.arm64+=            GENERIC64
+KERNEL_SETS.arm64+=            GENERIC64_PMAPMI
 .else
 IMAGEENDIAN=   le
 # little endian boards
@@ -65,7 +67,9 @@
 
 KERNEL_SETS.armv7+=            GENERIC
 KERNEL_SETS.armv7hf+=          GENERIC
+
 KERNEL_SETS.arm64+=            GENERIC64
+KERNEL_SETS.arm64+=            GENERIC64_PMAPMI
 .endif
 
 IMAGE.rel=     ${RELEASEDIR}/${RELEASEMACHINEDIR}
diff -r a8782a995570 -r 60cc62d74561 sys/arch/aarch64/aarch64/pmap_machdep.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/sys/arch/aarch64/aarch64/pmap_machdep.c   Thu Nov 03 09:04:56 2022 +0000
@@ -0,0 +1,780 @@
+/*     $NetBSD: pmap_machdep.c,v 1.1 2022/11/03 09:04:56 skrll Exp $   */
+
+/*-
+ * Copyright (c) 2022 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Nick Hudson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_arm_debug.h"
+#include "opt_efi.h"
+#include "opt_multiprocessor.h"
+#include "opt_uvmhist.h"
+
+#define __PMAP_PRIVATE
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.1 2022/11/03 09:04:56 skrll Exp $");
+
+#include <sys/param.h>
+#include <sys/types.h>
+
+#include <sys/buf.h>
+#include <sys/cpu.h>
+#include <sys/kernel.h>
+
+#include <uvm/uvm.h>
+#include <uvm/uvm_page.h>
+#include <uvm/pmap/pmap_pvt.h>
+
+#include <aarch64/cpufunc.h>
+
+#include <arm/locore.h>
+
+#ifdef VERBOSE_INIT_ARM
+#define VPRINTF(...)   printf(__VA_ARGS__)
+#else
+#define VPRINTF(...)   __nothing
+#endif
+
+/* Set to LX_BLKPAG_GP if supported. */
+uint64_t pmap_attr_gp = 0;
+
+/*
+ * Misc variables
+ */
+vaddr_t virtual_avail;
+vaddr_t virtual_end;
+
+bool pmap_devmap_bootstrap_done = false;
+
+paddr_t
+vtophys(vaddr_t va)
+{
+       paddr_t pa;
+
+       if (pmap_extract(pmap_kernel(), va, &pa) == false)
+               return 0;
+       return pa;
+}
+
+bool
+pmap_extract_coherency(pmap_t pm, vaddr_t va, paddr_t *pap, bool *coherentp)
+{
+       paddr_t pa;
+       bool coherency = false;
+
+       if (pm == pmap_kernel()) {
+               if (pmap_md_direct_mapped_vaddr_p(va)) {
+                       pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
+                       goto done;
+               }
+               if (pmap_md_io_vaddr_p(va))
+                       panic("pmap_extract: io address %#"PRIxVADDR"", va);
+
+               if (va >= pmap_limits.virtual_end)
+                       panic("%s: illegal kernel mapped address %#"PRIxVADDR,
+                           __func__, va);
+       }
+
+       kpreempt_disable();
+       const pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
+       pt_entry_t pte;
+
+       if (ptep == NULL || !pte_valid_p(pte = *ptep)) {
+               kpreempt_enable();
+               return false;
+       }
+       kpreempt_enable();
+
+       pa = pte_to_paddr(pte) | (va & PGOFSET);
+
+       switch (pte & LX_BLKPAG_ATTR_MASK) {
+       case LX_BLKPAG_ATTR_NORMAL_NC:
+       case LX_BLKPAG_ATTR_DEVICE_MEM:
+       case LX_BLKPAG_ATTR_DEVICE_MEM_NP:
+               coherency = true;
+               break;
+       }
+
+ done:
+       if (pap != NULL) {
+               *pap = pa;
+       }
+       if (coherentp != NULL) {
+               *coherentp = coherency;
+       }
+       return true;
+}
+
+
+bool
+pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, bool user)
+{
+       UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
+
+       KASSERT(!user || (pm != pmap_kernel()));
+
+       UVMHIST_LOG(pmaphist, " pm=%#jx, va=%#jx, ftype=%#jx, user=%jd",
+           (uintptr_t)pm, va, ftype, user);
+       UVMHIST_LOG(pmaphist, " ti=%#jx pai=%#jx asid=%#jx",
+           (uintptr_t)cpu_tlb_info(curcpu()),
+           (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu())),
+           (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0);
+
+       kpreempt_disable();
+
+       bool fixed = false;
+       pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
+       if (ptep == NULL) {
+               UVMHIST_LOG(pmaphist, "... no ptep", 0, 0, 0, 0);
+               goto done;
+       }
+
+       const pt_entry_t opte = *ptep;
+       if (!l3pte_valid(opte)) {
+               UVMHIST_LOG(pmaphist, "invalid pte: %016llx: va=%016lx",
+                   opte, va, 0, 0);
+               goto done;
+       }
+
+       const paddr_t pa = l3pte_pa(opte);
+       struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
+       if (pg == NULL) {
+               UVMHIST_LOG(pmaphist, "pg not found: va=%016lx", va, 0, 0, 0);
+               goto done;
+       }
+
+       struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
+       UVMHIST_LOG(pmaphist, " pg=%#jx, opte=%#jx, ptep=%#jx", (uintptr_t)pg,
+           opte, (uintptr_t)ptep, 0);
+
+       if ((ftype & VM_PROT_WRITE) && (opte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW) {
+               /*
+                * This looks like a good candidate for "page modified"
+                * emulation...
+                */
+               pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED);
+
+               /*
+                * Enable write permissions for the page by setting the Access Flag.
+                */
+               // XXXNH LX_BLKPAG_OS_0?
+               const pt_entry_t npte = opte | LX_BLKPAG_AF | LX_BLKPAG_OS_0;
+               atomic_swap_64(ptep, npte);
+               dsb(ishst);
+               fixed = true;
+
+               UVMHIST_LOG(pmaphist, " <-- done (mod emul: changed pte "
+                   "from %#jx to %#jx)", opte, npte, 0, 0);
+       } else if ((ftype & VM_PROT_READ) && (opte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RO) {
+               /*
+                * This looks like a good candidate for "page referenced"
+                * emulation.
+                */
+
+               pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
+
+               /*
+                * Enable write permissions for the page by setting the Access Flag.
+                */
+               const pt_entry_t npte = opte | LX_BLKPAG_AF;
+               atomic_swap_64(ptep, npte);
+               dsb(ishst);
+               fixed = true;
+
+               UVMHIST_LOG(pmaphist, " <-- done (ref emul: changed pte "
+                   "from %#jx to %#jx)", opte, npte, 0, 0);
+       }
+
+done:
+       kpreempt_enable();
+
+       return fixed;
+}
+
+
+void
+pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
+{
+       UVMHIST_FUNC(__func__);
+       UVMHIST_CALLARGS(pmaphist, "pm %#jx sva %#jx eva %#jx",
+          (uintptr_t)pm, sva, eva, 0);
+
+       KASSERT((sva & PAGE_MASK) == 0);
+       KASSERT((eva & PAGE_MASK) == 0);
+
+       pmap_lock(pm);
+
+       for (vaddr_t va = sva; va < eva; va += PAGE_SIZE) {
+               pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
+               if (ptep == NULL)
+                       continue;
+
+               pt_entry_t opte = *ptep;
+               if (!l3pte_valid(opte)) {
+                       UVMHIST_LOG(pmaphist, "invalid pte: %016llx: va=%016lx",
+                           opte, va, 0, 0);
+                       goto done;
+               }
+
+               if (l3pte_readable(opte)) {
+                       cpu_icache_sync_range(va, PAGE_SIZE);
+               } else {
+                       /*
+                        * change to accessible temporarily
+                        * to do cpu_icache_sync_range()
+                        */
+                       struct pmap_asid_info * const pai = PMAP_PAI(pm,
+                           cpu_tlb_info(ci));
+
+                       atomic_swap_64(ptep, opte | LX_BLKPAG_AF);
+                       // tlb_invalidate_addr does the dsb(ishst);
+                       tlb_invalidate_addr(pai->pai_asid, va);
+                       cpu_icache_sync_range(va, PAGE_SIZE);
+                       atomic_swap_64(ptep, opte);
+                       tlb_invalidate_addr(pai->pai_asid, va);
+               }
+       }
+done:
+       pmap_unlock(pm);
+}
+
+
+struct vm_page *



Home | Main Index | Thread Index | Old Index