Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/aarch64/aarch64 fix panic when modload.



details:   https://anonhg.NetBSD.org/src/rev/8063d51e21aa
branches:  trunk
changeset: 466967:8063d51e21aa
user:      ryo <ryo%NetBSD.org@localhost>
date:      Wed Jan 08 05:41:07 2020 +0000

description:
fix panic when modload.

>panic: kernel diagnostic assertion "!pmap_extract(pmap_kernel(), loopva, NULL)" failed: file "../../../../uvm/uvm_km.c", line 674 loopva=0xffffffc001000000'

The space allocated by bootpage_alloc() is only used as a physical page
for pagetable pages, so there is no need to map it with KVA.
And kernend_extra should not have consumed any KVA space.

diffstat:

 sys/arch/aarch64/aarch64/aarch64_machdep.c |  20 ++++++++++----------
 sys/arch/aarch64/aarch64/locore.S          |   5 ++---
 2 files changed, 12 insertions(+), 13 deletions(-)

diffs (114 lines):

diff -r a97020a661ec -r 8063d51e21aa sys/arch/aarch64/aarch64/aarch64_machdep.c
--- a/sys/arch/aarch64/aarch64/aarch64_machdep.c        Wed Jan 08 04:53:38 2020 +0000
+++ b/sys/arch/aarch64/aarch64/aarch64_machdep.c        Wed Jan 08 05:41:07 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: aarch64_machdep.c,v 1.36 2019/12/30 15:54:55 skrll Exp $ */
+/* $NetBSD: aarch64_machdep.c,v 1.37 2020/01/08 05:41:07 ryo Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.36 2019/12/30 15:54:55 skrll Exp $");
+__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.37 2020/01/08 05:41:07 ryo Exp $");
 
 #include "opt_arm_debug.h"
 #include "opt_ddb.h"
@@ -99,7 +99,7 @@
 /* filled in before cleaning bss. keep in .data */
 u_long kern_vtopdiff __attribute__((__section__(".data")));
 
-long kernend_extra;    /* extra memory allocated from round_page(_end[]) */
+long kernend_extra;    /* extra physicalmemory allocated from round_page(_end[]) */
 
 /* dump configuration */
 int    cpu_dump(void);
@@ -227,7 +227,7 @@
        pmapboot_protect(L2_TRUNC_BLOCK(kernstart),
            L2_TRUNC_BLOCK(data_start), VM_PROT_WRITE);
        pmapboot_protect(L2_ROUND_BLOCK(rodata_start),
-           L2_ROUND_BLOCK(kernend + kernend_extra), VM_PROT_EXECUTE);
+           L2_ROUND_BLOCK(kernend), VM_PROT_EXECUTE);
 
        aarch64_tlbi_all();
 
@@ -287,7 +287,7 @@
        kernstart = trunc_page((vaddr_t)__kernel_text);
        kernend = round_page((vaddr_t)_end);
        kernstart_l2 = L2_TRUNC_BLOCK(kernstart);
-       kernend_l2 = L2_ROUND_BLOCK(kernend + kernend_extra);
+       kernend_l2 = L2_ROUND_BLOCK(kernend);
        kernelvmstart = kernend_l2;
 
 #ifdef MODULAR
@@ -341,14 +341,14 @@
            "physical_start        = 0x%016lx\n"
            "kernel_start_phys     = 0x%016lx\n"
            "kernel_end_phys       = 0x%016lx\n"
+           "pagetables_start_phys = 0x%016lx\n"
+           "pagetables_end_phys   = 0x%016lx\n"
            "msgbuf                = 0x%016lx\n"
            "physical_end          = 0x%016lx\n"
            "VM_MIN_KERNEL_ADDRESS = 0x%016lx\n"
            "kernel_start_l2       = 0x%016lx\n"
            "kernel_start          = 0x%016lx\n"
            "kernel_end            = 0x%016lx\n"
-           "pagetables            = 0x%016lx\n"
-           "pagetables_end        = 0x%016lx\n"
            "kernel_end_l2         = 0x%016lx\n"
 #ifdef MODULAR
            "module_start          = 0x%016lx\n"
@@ -362,14 +362,14 @@
            physical_start,
            kernstart_phys,
            kernend_phys,
+           round_page(kernend_phys),
+           round_page(kernend_phys) + kernend_extra,
            msgbufaddr,
            physical_end,
            VM_MIN_KERNEL_ADDRESS,
            kernstart_l2,
            kernstart,
            kernend,
-           round_page(kernend),
-           round_page(kernend) + kernend_extra,
            kernend_l2,
 #ifdef MODULAR
            module_start,
@@ -553,7 +553,7 @@
 #define IN_RANGE(addr,sta,end) (((sta) <= (addr)) && ((addr) < (end)))
 
        *handled = false;
-       if (IN_RANGE(v, kernstart, kernend + kernend_extra)) {
+       if (IN_RANGE(v, kernstart, kernend)) {
                *handled = true;
                if ((v < data_start) && (prot & VM_PROT_WRITE))
                        return EFAULT;
diff -r a97020a661ec -r 8063d51e21aa sys/arch/aarch64/aarch64/locore.S
--- a/sys/arch/aarch64/aarch64/locore.S Wed Jan 08 04:53:38 2020 +0000
+++ b/sys/arch/aarch64/aarch64/locore.S Wed Jan 08 05:41:07 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: locore.S,v 1.49 2019/12/28 17:19:43 jmcneill Exp $     */
+/*     $NetBSD: locore.S,v 1.50 2020/01/08 05:41:07 ryo Exp $  */
 
 /*
  * Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -38,7 +38,7 @@
 #include <aarch64/hypervisor.h>
 #include "assym.h"
 
-RCSID("$NetBSD: locore.S,v 1.49 2019/12/28 17:19:43 jmcneill Exp $")
+RCSID("$NetBSD: locore.S,v 1.50 2020/01/08 05:41:07 ryo Exp $")
 
 #ifdef AARCH64_DEVICE_MEM_STRONGLY_ORDERED
 #define        MAIR_DEVICE_MEM         MAIR_DEVICE_nGnRnE
@@ -848,7 +848,6 @@
        adr     x1, start                       /* pa = start */
        ADDR    x2, _end
        sub     x2, x2, x1                      /* size = _end - start */
-       add     x2, x2, #BOOTPAGE_ALLOC_MAX     /* for bootpage_alloc() */
        ldr     x0, =start                      /* va */
        bl      pmapboot_enter
        cbnz    x0, init_mmutable_error



Home | Main Index | Thread Index | Old Index