Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch Add kASan support for aarch64. Stack tracking needs...



details:   https://anonhg.NetBSD.org/src/rev/31f94bda65ce
branches:  trunk
changeset: 445542:31f94bda65ce
user:      maxv <maxv%NetBSD.org@localhost>
date:      Thu Nov 01 20:34:49 2018 +0000

description:
Add kASan support for aarch64. Stack tracking needs more investigation
and will come in a separate commit.

Reviewed by ryo@ jmcneill@ skrll@.

diffstat:

 sys/arch/aarch64/aarch64/aarch64_machdep.c |   10 +-
 sys/arch/aarch64/aarch64/pmap.c            |   12 ++-
 sys/arch/aarch64/conf/Makefile.aarch64     |   10 +-
 sys/arch/aarch64/conf/kern.ldscript        |    5 +
 sys/arch/aarch64/include/asan.h            |  136 +++++++++++++++++++++++++++++
 sys/arch/aarch64/include/pmap.h            |    9 +-
 sys/arch/evbarm/conf/GENERIC64             |    6 +-
 sys/arch/evbarm/include/asan.h             |    5 +
 8 files changed, 185 insertions(+), 8 deletions(-)

diffs (truncated from 325 to 300 lines):

diff -r 8075d6ae1b0d -r 31f94bda65ce sys/arch/aarch64/aarch64/aarch64_machdep.c
--- a/sys/arch/aarch64/aarch64/aarch64_machdep.c        Thu Nov 01 19:11:31 2018 +0000
+++ b/sys/arch/aarch64/aarch64/aarch64_machdep.c        Thu Nov 01 20:34:49 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: aarch64_machdep.c,v 1.17 2018/10/31 13:42:24 jmcneill Exp $ */
+/* $NetBSD: aarch64_machdep.c,v 1.18 2018/11/01 20:34:49 maxv Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -30,10 +30,11 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.17 2018/10/31 13:42:24 jmcneill Exp $");
+__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.18 2018/11/01 20:34:49 maxv Exp $");
 
 #include "opt_arm_debug.h"
 #include "opt_ddb.h"
+#include "opt_kasan.h"
 #include "opt_kernhist.h"
 #include "opt_modular.h"
 #include "opt_fdt.h"
@@ -46,6 +47,7 @@
 #include <sys/msgbuf.h>
 #include <sys/sysctl.h>
 #include <sys/reboot.h>
+#include <sys/asan.h>
 
 #include <dev/mm.h>
 
@@ -358,6 +360,10 @@
         */
        pmap_bootstrap(kernelvmstart, VM_MAX_KERNEL_ADDRESS);
 
+#ifdef KASAN
+       kasan_init();
+#endif
+
        /*
         * setup lwp0
         */
diff -r 8075d6ae1b0d -r 31f94bda65ce sys/arch/aarch64/aarch64/pmap.c
--- a/sys/arch/aarch64/aarch64/pmap.c   Thu Nov 01 19:11:31 2018 +0000
+++ b/sys/arch/aarch64/aarch64/pmap.c   Thu Nov 01 20:34:49 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.32 2018/10/31 06:36:19 ryo Exp $    */
+/*     $NetBSD: pmap.c,v 1.33 2018/11/01 20:34:49 maxv Exp $   */
 
 /*
  * Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -27,10 +27,11 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.32 2018/10/31 06:36:19 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.33 2018/11/01 20:34:49 maxv Exp $");
 
 #include "opt_arm_debug.h"
 #include "opt_ddb.h"
+#include "opt_kasan.h"
 #include "opt_multiprocessor.h"
 #include "opt_pmap.h"
 #include "opt_uvmhist.h"
@@ -40,6 +41,7 @@
 #include <sys/kmem.h>
 #include <sys/vmem.h>
 #include <sys/atomic.h>
+#include <sys/asan.h>
 
 #include <uvm/uvm.h>
 
@@ -1407,6 +1409,12 @@
 #endif
        need_sync_icache = (prot & VM_PROT_EXECUTE);
 
+#ifdef KASAN
+       if (!user) {
+               kasan_shadow_map((void *)va, PAGE_SIZE);
+       }
+#endif
+
        if (l3pte_valid(pte)) {
                KASSERT(!kenter);       /* pmap_kenter_pa() cannot override */
 
diff -r 8075d6ae1b0d -r 31f94bda65ce sys/arch/aarch64/conf/Makefile.aarch64
--- a/sys/arch/aarch64/conf/Makefile.aarch64    Thu Nov 01 19:11:31 2018 +0000
+++ b/sys/arch/aarch64/conf/Makefile.aarch64    Thu Nov 01 20:34:49 2018 +0000
@@ -1,4 +1,4 @@
-#      $NetBSD: Makefile.aarch64,v 1.12 2018/09/22 12:24:01 rin Exp $
+#      $NetBSD: Makefile.aarch64,v 1.13 2018/11/01 20:34:49 maxv Exp $
 
 # Makefile for NetBSD
 #
@@ -39,6 +39,14 @@
 CFLAGS+=       -mno-omit-leaf-frame-pointer
 #CFLAGS+=      -mno-unaligned-access
 
+.if ${KASAN:U0} > 0 && ${HAVE_GCC:U0} > 0
+KASANFLAGS=    -fsanitize=kernel-address \
+               --param asan-globals=1
+.for f in subr_asan.c
+KASANFLAGS.${f}=       # empty
+.endfor
+CFLAGS+=       ${KASANFLAGS.${.IMPSRC:T}:U${KASANFLAGS}}
+.endif
 
 ##
 ## (3) libkern and compat
diff -r 8075d6ae1b0d -r 31f94bda65ce sys/arch/aarch64/conf/kern.ldscript
--- a/sys/arch/aarch64/conf/kern.ldscript       Thu Nov 01 19:11:31 2018 +0000
+++ b/sys/arch/aarch64/conf/kern.ldscript       Thu Nov 01 20:34:49 2018 +0000
@@ -24,6 +24,11 @@
        {
                *(.rodata)
                *(.rodata.*)
+               . = ALIGN(64);
+               __CTOR_LIST__ = .;
+               *(.ctors)
+               *(.init_array)
+               __CTOR_END__ = .;
        }
 
        PROVIDE(_etext = .);
diff -r 8075d6ae1b0d -r 31f94bda65ce sys/arch/aarch64/include/asan.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/sys/arch/aarch64/include/asan.h   Thu Nov 01 20:34:49 2018 +0000
@@ -0,0 +1,136 @@
+/*     $NetBSD: asan.h,v 1.1 2018/11/01 20:34:50 maxv Exp $    */
+
+/*
+ * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/atomic.h>
+#include <aarch64/pmap.h>
+#include <aarch64/vmparam.h>
+#include <aarch64/cpufunc.h>
+#include <aarch64/armreg.h>
+
+#define __MD_VIRTUAL_SHIFT     48      /* 49bit address space, cut half */
+#define __MD_CANONICAL_BASE    0xFFFF000000000000
+
+#define __MD_SHADOW_SIZE       (1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
+#define KASAN_MD_SHADOW_START  (AARCH64_KSEG_END)
+#define KASAN_MD_SHADOW_END    (KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
+
+static inline int8_t *
+kasan_md_addr_to_shad(const void *addr)
+{
+       vaddr_t va = (vaddr_t)addr;
+       return (int8_t *)(KASAN_MD_SHADOW_START +
+           ((va - __MD_CANONICAL_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
+}
+
+static inline bool
+kasan_md_unsupported(vaddr_t addr)
+{
+       return (addr < VM_MIN_KERNEL_ADDRESS) ||
+           (addr >= VM_KERNEL_IO_ADDRESS);
+}
+
+static paddr_t
+__md_palloc(void)
+{
+       paddr_t pa;
+
+       pmap_alloc_pdp(pmap_kernel(), &pa);
+
+       return pa;
+}
+
+static void
+kasan_md_shadow_map_page(vaddr_t va)
+{
+       pd_entry_t *l0, *l1, *l2, *l3;
+       paddr_t l0pa, pa;
+       pd_entry_t pde;
+       size_t idx;
+
+       l0pa = reg_ttbr1_el1_read();
+       l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
+
+       idx = l0pde_index(va);
+       pde = l0[idx];
+       if (!l0pde_valid(pde)) {
+               pa = __md_palloc();
+               atomic_swap_64(&l0[idx], pa | L0_TABLE);
+       } else {
+               pa = l0pde_pa(pde);
+       }
+       l1 = (void *)AARCH64_PA_TO_KVA(pa);
+
+       idx = l1pde_index(va);
+       pde = l1[idx];
+       if (!l1pde_valid(pde)) {
+               pa = __md_palloc();
+               atomic_swap_64(&l1[idx], pa | L1_TABLE);
+       } else {
+               pa = l1pde_pa(pde);
+       }
+       l2 = (void *)AARCH64_PA_TO_KVA(pa);
+
+       idx = l2pde_index(va);
+       pde = l2[idx];
+       if (!l2pde_valid(pde)) {
+               pa = __md_palloc();
+               atomic_swap_64(&l2[idx], pa | L2_TABLE);
+       } else {
+               pa = l2pde_pa(pde);
+       }
+       l3 = (void *)AARCH64_PA_TO_KVA(pa);
+
+       idx = l3pte_index(va);
+       pde = l3[idx];
+       if (!l3pte_valid(pde)) {
+               pa = __md_palloc();
+               atomic_swap_64(&l3[idx], pa | L3_PAGE | LX_BLKPAG_UXN |
+                   LX_BLKPAG_PXN | LX_BLKPAG_AF | LX_BLKPAG_AP_RW);
+               aarch64_tlbi_by_va(va);
+       }
+}
+
+#define kasan_md_early_init(a) __nothing
+
+static void
+kasan_md_init(void)
+{
+       vaddr_t eva, dummy;
+
+       CTASSERT((__MD_SHADOW_SIZE / L0_SIZE) == 64);
+
+       /* The VAs we've created until now. */
+       pmap_virtual_space(&eva, &dummy);
+       kasan_shadow_map((void *)VM_MIN_KERNEL_ADDRESS,
+           eva - VM_MIN_KERNEL_ADDRESS);
+}
+
+#define kasan_md_unwind()      __nothing
diff -r 8075d6ae1b0d -r 31f94bda65ce sys/arch/aarch64/include/pmap.h
--- a/sys/arch/aarch64/include/pmap.h   Thu Nov 01 19:11:31 2018 +0000
+++ b/sys/arch/aarch64/include/pmap.h   Thu Nov 01 20:34:49 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.16 2018/10/18 09:01:51 skrll Exp $ */
+/* $NetBSD: pmap.h,v 1.17 2018/11/01 20:34:50 maxv Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -35,6 +35,10 @@
 #ifdef __aarch64__
 
 #ifdef _KERNEL
+#ifdef _KERNEL_OPT
+#include "opt_kasan.h"
+#endif
+
 #include <sys/types.h>
 #include <sys/pool.h>
 #include <sys/queue.h>
@@ -47,9 +51,10 @@
 
 #define __HAVE_VM_PAGE_MD
 
+#ifndef KASAN
 #define PMAP_MAP_POOLPAGE(pa)          AARCH64_PA_TO_KVA(pa)
 #define PMAP_UNMAP_POOLPAGE(va)                AARCH64_KVA_TO_PA(va)
-
+#endif
 
 struct pmap {
        kmutex_t pm_lock;
diff -r 8075d6ae1b0d -r 31f94bda65ce sys/arch/evbarm/conf/GENERIC64
--- a/sys/arch/evbarm/conf/GENERIC64    Thu Nov 01 19:11:31 2018 +0000
+++ b/sys/arch/evbarm/conf/GENERIC64    Thu Nov 01 20:34:49 2018 +0000
@@ -1,5 +1,5 @@
 #



Home | Main Index | Thread Index | Old Index