Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/x86/x86 Modify the hotpatch mechanism, in order to ...



details:   https://anonhg.NetBSD.org/src/rev/d602137a3aea
branches:  trunk
changeset: 971717:d602137a3aea
user:      maxv <maxv%NetBSD.org@localhost>
date:      Sat May 02 11:37:17 2020 +0000

description:
Modify the hotpatch mechanism, in order to make it much less ROP-friendly.

Currently x86_patch_window_open is a big problem, because it is a perfect
function to inject/modify executable code with ROP.

 - Remove x86_patch_window_open(), along with its x86_patch_window_close()
   counterpart.
 - Introduce a read-only link-set of hotpatch descriptor structures,
   which reference a maximum of two read-only hotpatch sources.
 - Modify x86_hotpatch() to open a window and call the new
   x86_hotpatch_apply() function in a hard-coded manner.
 - Modify x86_hotpatch() to take a name and a selector, and have
   x86_hotpatch_apply() resolve the descriptor from the name and the
   source from the selector, before hotpatching.
 - Move the error handling in a separate x86_hotpatch_cleanup() function,
   that gets called after we closed the window.

The resulting implementation is a bit complex and non-obvious. But it
gains the following properties: the code executed in the hotpatch window
is strictly hard-coded (no callback and no possibility to execute your own
code in the window) and the pointers this code accesses are strictly
read-only (no possibility to forge pointers to hotpatch an area that was
not designated as hotpatchable at compile-time, and no possibility to
choose what bytes to write other than the maximum of two read-only
templates that were designated as valid for the given destination at
compile-time).

With current CPUs this slightly improves a situation that is already
pretty bad by definition on x86. Assuming CET however, this change closes
a big hole and is kinda great.

The only ~problem there is, is that dtrace-fbt tries to hotpatch random
places with random bytes, and there is just no way to make it safe.
However dtrace is only in a module, that is rarely used and never compiled
into the kernel, so it's not a big problem; add a shitty & vulnerable
independent hotpatch window in it, and leave big XXXs. It looks like fbt
is going to collapse soon anyway.

diffstat:

 external/cddl/osnet/dev/fbt/x86/fbt_isa.c |   28 ++-
 sys/arch/amd64/amd64/cpufunc.S            |   40 ++++-
 sys/arch/i386/i386/cpufunc.S              |   42 ++++-
 sys/arch/x86/include/cpufunc.h            |   17 +-
 sys/arch/x86/x86/patch.c                  |  286 ++++++++++++++++++++---------
 sys/arch/x86/x86/spectre.c                |  122 ++++++------
 sys/arch/x86/x86/svs.c                    |  132 +++++++++----
 7 files changed, 466 insertions(+), 201 deletions(-)

diffs (truncated from 909 to 300 lines):

diff -r 7b28fac6b7d2 -r d602137a3aea external/cddl/osnet/dev/fbt/x86/fbt_isa.c
--- a/external/cddl/osnet/dev/fbt/x86/fbt_isa.c Sat May 02 11:28:02 2020 +0000
+++ b/external/cddl/osnet/dev/fbt/x86/fbt_isa.c Sat May 02 11:37:17 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: fbt_isa.c,v 1.2 2019/11/13 10:13:41 maxv Exp $ */
+/*     $NetBSD: fbt_isa.c,v 1.3 2020/05/02 11:37:17 maxv Exp $ */
 
 /*
  * CDDL HEADER START
@@ -172,18 +172,40 @@
 #endif
 
 #ifdef __NetBSD__
+/*
+ * XXX XXX XXX This is absolutely unsafe, the mere existence of this code is a
+ * problem, because this function is too easily ROP-able. But this gets
+ * compiled as a module and never in the kernel, so we are fine "by default".
+ * XXX Add a #warning if it gets compiled in the kernel?
+ */
 void
 fbt_patch_tracepoint(fbt_probe_t *fbt, fbt_patchval_t val)
 {
        u_long psl, cr0;
 
-       x86_patch_window_open(&psl, &cr0);
+       /* Disable interrupts. */
+       psl = x86_read_psl();
+       x86_disable_intr();
 
+       /* Disable write protection in supervisor mode. */
+       cr0 = rcr0();
+       lcr0(cr0 & ~CR0_WP);
+
+       /* XXX XXX XXX Shouldn't rely on caller-provided dst! */
+       /* XXX XXX XXX Shouldn't rely on caller-provided val! */
        for (; fbt != NULL; fbt = fbt->fbtp_next) {
                *fbt->fbtp_patchpoint = val;
        }
 
-       x86_patch_window_close(psl, cr0);
+       /* Write back and invalidate cache, flush pipelines. */
+       wbinvd();
+       x86_flush();
+
+       /* Re-enable write protection. */
+       lcr0(cr0);
+
+       /* Restore the PSL, potentially re-enabling interrupts. */
+       x86_write_psl(psl);
 }
 #endif
 
diff -r 7b28fac6b7d2 -r d602137a3aea sys/arch/amd64/amd64/cpufunc.S
--- a/sys/arch/amd64/amd64/cpufunc.S    Sat May 02 11:28:02 2020 +0000
+++ b/sys/arch/amd64/amd64/cpufunc.S    Sat May 02 11:37:17 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpufunc.S,v 1.49 2019/11/21 19:23:58 ad Exp $  */
+/*     $NetBSD: cpufunc.S,v 1.50 2020/05/02 11:37:17 maxv Exp $        */
 
 /*
  * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -380,3 +380,41 @@
        outl    %eax, %dx
        ret
 END(outl)
+
+/*
+ * %rdi = name
+ * %rsi = sel
+ */
+ENTRY(x86_hotpatch)
+       /* save RFLAGS, and disable intrs */
+       pushfq
+       cli
+
+       /* save CR0, and disable WP */
+       movq    %cr0,%rcx
+       pushq   %rcx
+       andq    $~CR0_WP,%rcx
+       movq    %rcx,%cr0
+
+       callq   _C_LABEL(x86_hotpatch_apply)
+
+       /* write back and invalidate cache */
+       wbinvd
+
+       /* restore CR0 */
+       popq    %rcx
+       movq    %rcx,%cr0
+
+       /* flush instruction pipeline */
+       pushq   %rax
+       callq   x86_flush
+       popq    %rax
+
+       /* clean up */
+       movq    %rax,%rdi
+       callq   _C_LABEL(x86_hotpatch_cleanup)
+
+       /* restore RFLAGS */
+       popfq
+       ret
+END(x86_hotpatch)
diff -r 7b28fac6b7d2 -r d602137a3aea sys/arch/i386/i386/cpufunc.S
--- a/sys/arch/i386/i386/cpufunc.S      Sat May 02 11:28:02 2020 +0000
+++ b/sys/arch/i386/i386/cpufunc.S      Sat May 02 11:37:17 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpufunc.S,v 1.38 2019/11/21 19:24:00 ad Exp $  */
+/*     $NetBSD: cpufunc.S,v 1.39 2020/05/02 11:37:17 maxv Exp $        */
 
 /*-
  * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc.
@@ -38,7 +38,7 @@
 #include <sys/errno.h>
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.38 2019/11/21 19:24:00 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.39 2020/05/02 11:37:17 maxv Exp $");
 
 #include "opt_xen.h"
 
@@ -274,3 +274,41 @@
        outl    %eax, %dx
        ret
 END(outl)
+
+ENTRY(x86_hotpatch)
+       /* save EFLAGS, and disable intrs */
+       pushfl
+       cli
+
+       /* save CR0, and disable WP */
+       movl    %cr0,%ecx
+       pushl   %ecx
+       andl    $~CR0_WP,%ecx
+       movl    %ecx,%cr0
+
+       pushl   4*4(%esp) /* arg2 */
+       pushl   4*4(%esp) /* arg1 */
+       call    _C_LABEL(x86_hotpatch_apply)
+       addl    $2*4,%esp
+
+       /* write back and invalidate cache */
+       wbinvd
+
+       /* restore CR0 */
+       popl    %ecx
+       movl    %ecx,%cr0
+
+       /* flush instruction pipeline */
+       pushl   %eax
+       call    x86_flush
+       popl    %eax
+
+       /* clean up */
+       pushl   %eax
+       call    _C_LABEL(x86_hotpatch_cleanup)
+       addl    $4,%esp
+
+       /* restore RFLAGS */
+       popfl
+       ret
+END(x86_hotpatch)
diff -r 7b28fac6b7d2 -r d602137a3aea sys/arch/x86/include/cpufunc.h
--- a/sys/arch/x86/include/cpufunc.h    Sat May 02 11:28:02 2020 +0000
+++ b/sys/arch/x86/include/cpufunc.h    Sat May 02 11:37:17 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: cpufunc.h,v 1.38 2020/04/25 15:26:18 bouyer Exp $      */
+/*     $NetBSD: cpufunc.h,v 1.39 2020/05/02 11:37:17 maxv Exp $        */
 
 /*
  * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
@@ -104,9 +104,18 @@
 }
 
 #ifndef XENPV
-void   x86_hotpatch(uint32_t, const uint8_t *, size_t);
-void   x86_patch_window_open(u_long *, u_long *);
-void   x86_patch_window_close(u_long, u_long);
+struct x86_hotpatch_source {
+       uint8_t *saddr;
+       uint8_t *eaddr;
+};
+
+struct x86_hotpatch_descriptor {
+       uint8_t name;
+       uint8_t nsrc;
+       const struct x86_hotpatch_source *srcs[];
+};
+
+void   x86_hotpatch(uint8_t, uint8_t);
 void   x86_patch(bool);
 #endif
 
diff -r 7b28fac6b7d2 -r d602137a3aea sys/arch/x86/x86/patch.c
--- a/sys/arch/x86/x86/patch.c  Sat May 02 11:28:02 2020 +0000
+++ b/sys/arch/x86/x86/patch.c  Sat May 02 11:37:17 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: patch.c,v 1.46 2020/05/01 09:40:47 maxv Exp $  */
+/*     $NetBSD: patch.c,v 1.47 2020/05/02 11:37:17 maxv Exp $  */
 
 /*-
  * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.46 2020/05/01 09:40:47 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.47 2020/05/02 11:37:17 maxv Exp $");
 
 #include "opt_lockdebug.h"
 #ifdef i386
@@ -52,12 +52,137 @@
 #include <x86/cpuvar.h>
 #include <x86/cputypes.h>
 
-struct hotpatch {
+__link_set_decl(x86_hotpatch_descriptors, struct x86_hotpatch_descriptor);
+
+struct x86_hotpatch_destination {
        uint8_t name;
        uint8_t size;
        void *addr;
 } __packed;
 
+/* -------------------------------------------------------------------------- */
+
+/* CLAC instruction, part of SMAP. */
+extern uint8_t hp_clac, hp_clac_end;
+static const struct x86_hotpatch_source hp_clac_source = {
+       .saddr = &hp_clac,
+       .eaddr = &hp_clac_end
+};
+static const struct x86_hotpatch_descriptor hp_clac_desc = {
+       .name = HP_NAME_CLAC,
+       .nsrc = 1,
+       .srcs = { &hp_clac_source }
+};
+__link_set_add_rodata(x86_hotpatch_descriptors, hp_clac_desc);
+
+/* STAC instruction, part of SMAP. */
+extern uint8_t hp_stac, hp_stac_end;
+static const struct x86_hotpatch_source hp_stac_source = {
+       .saddr = &hp_stac,
+       .eaddr = &hp_stac_end
+};
+static const struct x86_hotpatch_descriptor hp_stac_desc = {
+       .name = HP_NAME_STAC,
+       .nsrc = 1,
+       .srcs = { &hp_stac_source }
+};
+__link_set_add_rodata(x86_hotpatch_descriptors, hp_stac_desc);
+
+/* Errata on certain AMD CPUs. */
+extern uint8_t hp_retfence, hp_retfence_end;
+static const struct x86_hotpatch_source hp_retfence_source = {
+       .saddr = &hp_retfence,
+       .eaddr = &hp_retfence_end
+};
+static const struct x86_hotpatch_descriptor hp_retfence_desc = {
+       .name = HP_NAME_RETFENCE,
+       .nsrc = 1,
+       .srcs = { &hp_retfence_source }
+};
+__link_set_add_rodata(x86_hotpatch_descriptors, hp_retfence_desc);
+
+/* No lock when on a single processor. */
+extern uint8_t hp_nolock, hp_nolock_end;
+static const struct x86_hotpatch_source hp_nolock_source = {
+       .saddr = &hp_nolock,
+       .eaddr = &hp_nolock_end
+};
+static const struct x86_hotpatch_descriptor hp_nolock_desc = {
+       .name = HP_NAME_NOLOCK,
+       .nsrc = 1,
+       .srcs = { &hp_nolock_source }
+};
+__link_set_add_rodata(x86_hotpatch_descriptors, hp_nolock_desc);
+
+/* Use LFENCE if available, part of SSE2. */
+extern uint8_t sse2_lfence, sse2_lfence_end;
+static const struct x86_hotpatch_source hp_sse2_lfence_source = {
+       .saddr = &sse2_lfence,
+       .eaddr = &sse2_lfence_end
+};
+static const struct x86_hotpatch_descriptor hp_sse2_lfence_desc = {
+       .name = HP_NAME_SSE2_LFENCE,
+       .nsrc = 1,
+       .srcs = { &hp_sse2_lfence_source }
+};
+__link_set_add_rodata(x86_hotpatch_descriptors, hp_sse2_lfence_desc);
+
+/* Use MFENCE if available, part of SSE2. */
+extern uint8_t sse2_mfence, sse2_mfence_end;
+static const struct x86_hotpatch_source hp_sse2_mfence_source = {
+       .saddr = &sse2_mfence,
+       .eaddr = &sse2_mfence_end
+};
+static const struct x86_hotpatch_descriptor hp_sse2_mfence_desc = {
+       .name = HP_NAME_SSE2_MFENCE,



Home | Main Index | Thread Index | Old Index