Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/usr.sbin/crash/arch On aarch64, ddb backtrace can be perform...



details:   https://anonhg.NetBSD.org/src/rev/91763f6a7aae
branches:  trunk
changeset: 366679:91763f6a7aae
user:      ryo <ryo%NetBSD.org@localhost>
date:      Tue Jun 07 08:08:31 2022 +0000

description:
On aarch64, ddb backtrace can be performed without framepointer by specifying
the /s modifier to the ddb trace command (trace/s, bt/s).
The default is trace with framepointer (same as before).

This allows backtracing even on kernels compiled with -fomit-frame-pointer.

diffstat:

 sys/arch/aarch64/aarch64/cpuswitch.S |   22 +-
 sys/arch/aarch64/aarch64/db_trace.c  |  448 ++++++++++++++++++++++++++++++++++-
 usr.sbin/crash/arch/aarch64.c        |   11 +-
 3 files changed, 465 insertions(+), 16 deletions(-)

diffs (truncated from 626 to 300 lines):

diff -r ff6bf9980b6e -r 91763f6a7aae sys/arch/aarch64/aarch64/cpuswitch.S
--- a/sys/arch/aarch64/aarch64/cpuswitch.S      Tue Jun 07 06:06:46 2022 +0000
+++ b/sys/arch/aarch64/aarch64/cpuswitch.S      Tue Jun 07 08:08:31 2022 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpuswitch.S,v 1.37 2022/06/07 04:12:10 ryo Exp $ */
+/* $NetBSD: cpuswitch.S,v 1.38 2022/06/07 08:08:31 ryo Exp $ */
 
 /*-
  * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
@@ -38,7 +38,7 @@
 #include "opt_ddb.h"
 #include "opt_kasan.h"
 
-RCSID("$NetBSD: cpuswitch.S,v 1.37 2022/06/07 04:12:10 ryo Exp $")
+RCSID("$NetBSD: cpuswitch.S,v 1.38 2022/06/07 08:08:31 ryo Exp $")
 
        ARMV8_DEFINE_OPTIONS
 
@@ -158,6 +158,9 @@
  * }
  */
 ENTRY_NP(cpu_switchto_softint)
+#ifdef DDB
+       mov     x7, sp                  /* keep original sp for backtrace */
+#endif
        stp     x19, x20, [sp, #-16]!   /* save */
        sub     sp, sp, #TF_SIZE        /* make switchframe */
        adr     x2, softint_cleanup     /* return address for cpu_switchto() */
@@ -180,7 +183,13 @@
 #ifdef KASAN
        /* clear the new stack */
        stp     x0, x1, [sp, #-16]!
+#ifdef DDB
+       stp     x7, lr, [sp, #-16]!     /* original sp and lr */
+#endif
        bl      _C_LABEL(kasan_softint)
+#ifdef DDB
+       ldp     x7, lr, [sp], #16
+#endif
        ldp     x0, x1, [sp], #16
 #endif
 
@@ -212,10 +221,19 @@
 #endif
        ENABLE_INTERRUPT
 
+#ifdef DDB
+       /* for db_trace.c:db_sp_trace() */
+       stp     x7, lr, [sp, #-16]!     /* push original sp,lr for backtrace info */
+#endif
+
        /* softint_dispatch(pinned_lwp, ipl) */
        mov     x0, x19                 /* x0 := pinned_lwp */
        bl      _C_LABEL(softint_dispatch)
 
+#ifdef DDB
+       add     sp, sp, #16             /* pop backtrace info */
+#endif
+
        ldr     x6, [x19, #L_PCB]       /* x6 = lwp_getpcb(curlwp) */
        ldr     x4, [x6, #PCB_TF]       /* x4 := pinned_lwp->l_addr->pcb_tf */
 #ifdef DDB
diff -r ff6bf9980b6e -r 91763f6a7aae sys/arch/aarch64/aarch64/db_trace.c
--- a/sys/arch/aarch64/aarch64/db_trace.c       Tue Jun 07 06:06:46 2022 +0000
+++ b/sys/arch/aarch64/aarch64/db_trace.c       Tue Jun 07 08:08:31 2022 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: db_trace.c,v 1.17 2022/06/02 05:09:01 ryo Exp $ */
+/* $NetBSD: db_trace.c,v 1.18 2022/06/07 08:08:31 ryo Exp $ */
 
 /*
  * Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -28,9 +28,10 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: db_trace.c,v 1.17 2022/06/02 05:09:01 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: db_trace.c,v 1.18 2022/06/07 08:08:31 ryo Exp $");
 
 #include <sys/param.h>
+#include <sys/bitops.h>
 #include <sys/proc.h>
 
 #include <aarch64/db_machdep.h>
@@ -150,6 +151,7 @@
 }
 
 #define TRACEFLAG_LOOKUPLWP    0x00000001
+#define TRACEFLAG_USERSPACE    0x00000002
 
 static void
 pr_traceaddr(const char *prefix, uint64_t frame, uint64_t pc, int flags,
@@ -182,6 +184,420 @@
        }
 }
 
+static __inline uint64_t
+SignExtend(int bitwidth, uint64_t imm, unsigned int multiply)
+{
+       const uint64_t signbit = ((uint64_t)1 << (bitwidth - 1));
+       const uint64_t immmax = signbit << 1;
+
+       if (imm & signbit)
+               imm -= immmax;
+       return imm * multiply;
+}
+
+static __inline uint64_t
+ZeroExtend(int bitwidth, uint64_t imm, unsigned int multiply)
+{
+       return imm * multiply;
+}
+
+/* rotate right. if n < 0, rotate left. */
+static __inline uint64_t
+rotate(int bitwidth, uint64_t v, int n)
+{
+       uint64_t result;
+
+       n &= (bitwidth - 1);
+       result = (((v << (bitwidth - n)) | (v >> n)));
+       if (bitwidth < 64)
+               result &= ((1ULL << bitwidth) - 1);
+       return result;
+}
+
+static __inline uint64_t
+DecodeBitMasks(uint64_t sf, uint64_t n, uint64_t imms, uint64_t immr)
+{
+       const int bitwidth = (sf == 0) ? 32 : 64;
+       uint64_t result;
+       int esize, len;
+
+       len = fls64((n << 6) + (~imms & 0x3f)) - 1;
+       esize = (1 << len);
+       imms &= (esize - 1);
+       immr &= (esize - 1);
+       result = rotate(esize, (1ULL << (imms + 1)) - 1, immr);
+       while (esize < bitwidth) {
+               result |= (result << esize);
+               esize <<= 1;
+       }
+       if (sf == 0)
+               result &= ((1ULL << bitwidth) - 1);
+       return result;
+}
+
+static int
+analyze_func(db_addr_t func_entry, db_addr_t pc, db_addr_t sp,
+    db_addr_t *lrp, vsize_t *stacksizep,
+    void (*pr)(const char *, ...) __printflike(1, 2))
+{
+       vsize_t ssize = 0, lr_off = 0;
+       db_addr_t lr = 0;
+       uint64_t alloc_by_Xn_kvalue = 0;
+       uint64_t alloc_by_Xn_kmask = 0;
+       int alloc_by_Xn_reg = -1;
+       bool found_lr_off = false;
+       bool func_entry_autodetect = false;
+
+#define MAX_BACKTRACK_ANALYZE_INSN     (1024 * 4)
+       if (func_entry == 0) {
+               if (pc > MAX_BACKTRACK_ANALYZE_INSN)
+                       func_entry = pc - MAX_BACKTRACK_ANALYZE_INSN;
+               else
+                       func_entry = 4;
+               func_entry_autodetect = true;
+       };
+
+
+       /*
+        * Locate the following instructions that allocates a stackframe.
+        * Only the following patterns are supported:
+        *
+        *  sub sp, sp, #ALLOCSIZE              -> ssize += ALLOCSIZE
+        *  sub sp, sp, #ALLOCSIZE, lsl #12     -> ssize += (ALLOCSIZE << 12)
+        *
+        *  mov xN, #ALLOCSIZE1
+        *  (movk xN, #ALLOCSIZE2, lsl #xx)
+        *  sub sp, sp, xN                      -> ssize += ALLOCSIZE
+        *
+        *  stp x30, x??, [sp, #-ALLOCSIZE]!    -> ssize =+ ALLOCSIZE, lr_off=0
+        *  stp x??, x30, [sp, #-ALLOCSIZE]!    -> ssize =+ ALLOCSIZE, lr_off=8
+        *  stp x??, x??, [sp, #-ALLOCSIZE]!    -> ssize =+ ALLOCSIZE
+        *
+        *  str x30, [sp, #-ALLOCSIZE]!         -> ssize =+ ALLOCSIZE, lr_off=0
+        *
+        *  stp x30, x??, [sp, #LR_OFF]         -> lr_off = LR_OFF
+        *  stp x??, x30, [sp, #LR_OFF]         -> lr_off = LR_OFF+8
+        *  str x30, [sp, #LR_OFF]              -> lr_off = LR_OFF
+        */
+
+/* #define BACKTRACE_ANALYZE_DEBUG */
+#ifdef BACKTRACE_ANALYZE_DEBUG
+#define TRACE_DEBUG(fmt, args...)      pr("BACKTRACE: " fmt, ## args)
+#else
+#define TRACE_DEBUG(args...)           __nothing
+#endif
+
+       TRACE_DEBUG("func_entry=%016lx\n", func_entry);
+       TRACE_DEBUG("        pc=%016lx (+%#lx)\n", pc, pc - func_entry);
+       TRACE_DEBUG("        sp=%016lx\n", sp);
+
+       for (pc -= 4; pc >= func_entry; pc -= 4) {
+               uint32_t insn;
+
+               db_read_bytes(pc, sizeof(insn), (char *)&insn);
+               if (insn == 0)
+                       break;
+               LE32TOH(insn);
+
+               TRACE_DEBUG("INSN: %016lx: %04x\n", pc, insn);
+
+               /* "ret", "eret", or "paciasp" to detect function entry */
+               if (func_entry_autodetect && (
+                   insn == 0xd65f03e0 ||       /* "ret" */
+                   insn == 0xd69f03e0 ||       /* "eret" */
+                   insn == 0xd503233f))        /* "paciasp" */
+                       break;
+
+               /* "sub sp,sp,#imm" or "sub sp,sp,#imm,lsl #12" */
+               if ((insn & 0xff8003ff) == 0xd10003ff) {
+                       unsigned int sh = (insn >> 22) & 1;
+                       uint64_t imm12 =
+                           ZeroExtend(12, (insn >> 10) & 0xfff, 1);
+                       if (sh)
+                               imm12 <<= 12;
+                       ssize += imm12;
+                       TRACE_DEBUG("sub sp,sp,#%lu\n", imm12);
+                       continue;
+               }
+
+               /* sub sp,sp,Xn */
+               if ((insn & 0xffe0ffff) == 0xcb2063ff) {
+                       alloc_by_Xn_reg = (insn >> 16) & 0x1f;
+                       alloc_by_Xn_kvalue = 0;
+                       alloc_by_Xn_kmask = 0;
+                       TRACE_DEBUG("sub sp,sp,x%d\n", alloc_by_Xn_reg);
+                       continue;
+               }
+               if (alloc_by_Xn_reg >= 0) {
+                       /* movk xN,#ALLOCSIZE2,lsl #xx */
+                       if ((insn & 0xff80001f) ==
+                           (0xf2800000 | alloc_by_Xn_reg)) {
+                               int hw = (insn >> 21) & 3;
+                               alloc_by_Xn_kvalue = ZeroExtend(16,
+                                   (insn >> 5) & 0xffff, 1) << (hw * 16);
+                               alloc_by_Xn_kmask = (0xffffULL << (hw * 16));
+                               TRACE_DEBUG("movk x%d,#%#lx,lsl #%d\n",
+                                   alloc_by_Xn_reg, alloc_by_Xn_kvalue,
+                                   hw * 16);
+                               continue;
+                       }
+
+                       /* (orr) mov xN,#ALLOCSIZE1 */
+                       if ((insn & 0xff8003ff) ==
+                           (0xb20003e0 | alloc_by_Xn_reg)) {
+                               uint64_t n = (insn >> 22) & 1;
+                               uint64_t immr = (insn >> 16) & 0x3f;
+                               uint64_t imms = (insn >> 10) & 0x3f;
+                               uint64_t v = DecodeBitMasks(1, n, imms, immr);
+                               TRACE_DEBUG("(orr) mov x%d,#%#lx\n",
+                                   alloc_by_Xn_reg, v);
+                               ssize += v;
+                               alloc_by_Xn_reg = -1;
+                               continue;
+                       }
+
+                       /* (movz) mov xN,#ALLOCSIZE1 */
+                       if ((insn & 0xffe0001f) ==
+                           (0xd2800000 | alloc_by_Xn_reg)) {
+                               uint64_t v =
+                                   ZeroExtend(16, (insn >> 5) & 0xffff, 1);
+                               TRACE_DEBUG("(movz) mov x%d,#%#lx\n",
+                                   alloc_by_Xn_reg, v);
+                               v &= ~alloc_by_Xn_kmask;
+                               v |= alloc_by_Xn_kvalue;
+                               ssize += v;
+                               alloc_by_Xn_reg = -1;
+                               continue;
+                       }
+                       /* (movn) mov xN,#ALLOCSIZE1 */
+                       if ((insn & 0xffe0001f) ==
+                           (0x92800000 | alloc_by_Xn_reg)) {
+                               uint64_t v =
+                                   ~ZeroExtend(16, (insn >> 5) & 0xffff, 1);
+                               TRACE_DEBUG("(movn) mov x%d,#%#lx\n",
+                                   alloc_by_Xn_reg, v);
+                               v &= ~alloc_by_Xn_kmask;
+                               v |= alloc_by_Xn_kvalue;
+                               ssize += v;
+                               alloc_by_Xn_reg = -1;
+                               continue;
+                       }
+               }
+
+               /* stp x??,x??,[sp,#-imm7]! */
+               if ((insn & 0xffe003e0) == 0xa9a003e0) {
+                       int64_t imm7 = SignExtend(7, (insn >> 15) & 0x7f, 8);
+                       uint64_t Rt2 = (insn >> 10) & 0x1f;
+                       uint64_t Rt1 = insn & 0x1f;



Home | Main Index | Thread Index | Old Index