Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch Use absolute jumps, and drop the PC-relative patchi...



details:   https://anonhg.NetBSD.org/src/rev/8bdfccd9af67
branches:  trunk
changeset: 932161:8bdfccd9af67
user:      maxv <maxv%NetBSD.org@localhost>
date:      Fri May 01 09:17:58 2020 +0000

description:
Use absolute jumps, and drop the PC-relative patching. We want exact
templates.

diffstat:

 sys/arch/i386/i386/lock_stubs.S |  16 ++++++++++------
 sys/arch/i386/i386/spl.S        |  13 +++++++------
 sys/arch/x86/x86/patch.c        |  36 +++++-------------------------------
 3 files changed, 22 insertions(+), 43 deletions(-)

diffs (165 lines):

diff -r 223b77a6492b -r 8bdfccd9af67 sys/arch/i386/i386/lock_stubs.S
--- a/sys/arch/i386/i386/lock_stubs.S   Fri May 01 08:45:01 2020 +0000
+++ b/sys/arch/i386/i386/lock_stubs.S   Fri May 01 09:17:58 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: lock_stubs.S,v 1.33 2020/04/25 15:26:17 bouyer Exp $   */
+/*     $NetBSD: lock_stubs.S,v 1.34 2020/05/01 09:17:58 maxv Exp $     */
 
 /*-
  * Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -35,7 +35,7 @@
  */
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: lock_stubs.S,v 1.33 2020/04/25 15:26:17 bouyer Exp $");
+__KERNEL_RCSID(0, "$NetBSD: lock_stubs.S,v 1.34 2020/05/01 09:17:58 maxv Exp $");
 
 #include "opt_lockdebug.h"
 
@@ -271,13 +271,14 @@
        movl    %ecx, CPUVAR(ILEVEL)
        sti
 1:     ret
-       .space 32
+       .space  32, 0xCC
        .align  32
 LABEL(mutex_spin_exit_end)
 END(mutex_spin_exit)
 #else  /* XENPV */
 STRONG_ALIAS(mutex_spin_exit, i686_mutex_spin_exit)
 #endif /* !XENPV */
+
 /*
  * Patch for i686 CPUs where cli/sti is prohibitively expensive.
  * Must be the same size as mutex_spin_exit().
@@ -302,9 +303,12 @@
 2:
        popl    %ebx
        movl    %ecx,4(%esp)
-LABEL(i686_mutex_spin_exit_patch)
-       jmp     _C_LABEL(Xspllower)
-       .space 16
+
+       /* The reference must be absolute, hence the indirect jump. */
+       movl    $Xspllower,%eax
+       jmp     *%eax
+
+       .space  16, 0xCC
        .align  32
 LABEL(i686_mutex_spin_exit_end)
 END(i686_mutex_spin_exit)
diff -r 223b77a6492b -r 8bdfccd9af67 sys/arch/i386/i386/spl.S
--- a/sys/arch/i386/i386/spl.S  Fri May 01 08:45:01 2020 +0000
+++ b/sys/arch/i386/i386/spl.S  Fri May 01 09:17:58 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: spl.S,v 1.51 2020/04/25 15:26:17 bouyer Exp $  */
+/*     $NetBSD: spl.S,v 1.52 2020/05/01 09:17:58 maxv Exp $    */
 
 /*
  * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.51 2020/04/25 15:26:17 bouyer Exp $");
+__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.52 2020/05/01 09:17:58 maxv Exp $");
 
 #include "opt_ddb.h"
 #include "opt_spldebug.h"
@@ -147,10 +147,11 @@
 2:
        popl    %ebx
 
-       .type   _C_LABEL(cx8_spllower_patch), @function
-LABEL(cx8_spllower_patch)
-       jmp     _C_LABEL(Xspllower)
-       .align  32
+       /* The reference must be absolute, hence the indirect jump. */
+       movl    $Xspllower,%eax
+       jmp     *%eax
+
+       .align  32, 0xCC
 LABEL(cx8_spllower_end)
 END(cx8_spllower)
 
diff -r 223b77a6492b -r 8bdfccd9af67 sys/arch/x86/x86/patch.c
--- a/sys/arch/x86/x86/patch.c  Fri May 01 08:45:01 2020 +0000
+++ b/sys/arch/x86/x86/patch.c  Fri May 01 09:17:58 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: patch.c,v 1.44 2020/05/01 08:32:50 maxv Exp $  */
+/*     $NetBSD: patch.c,v 1.45 2020/05/01 09:17:58 maxv Exp $  */
 
 /*-
  * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.44 2020/05/01 08:32:50 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.45 2020/05/01 09:17:58 maxv Exp $");
 
 #include "opt_lockdebug.h"
 #ifdef i386
@@ -62,35 +62,13 @@
 void   spllower_end(void);
 void   cx8_spllower(int);
 void   cx8_spllower_end(void);
-void   cx8_spllower_patch(void);
 
 void   mutex_spin_exit_end(void);
 void   i686_mutex_spin_exit(int);
 void   i686_mutex_spin_exit_end(void);
-void   i686_mutex_spin_exit_patch(void);
-
-#define        X86_CS          0x2e
-#define        X86_DS          0x3e
-#define        X86_GROUP_0F    0x0f
-
-static void
-adjust_jumpoff(uint8_t *ptr, void *from_s, void *to_s)
-{
-
-       /* Branch hints */
-       if (ptr[0] == X86_CS || ptr[0] == X86_DS)
-               ptr++;
-       /* Conditional jumps */
-       if (ptr[0] == X86_GROUP_0F)
-               ptr++;          
-       /* 4-byte relative jump or call */
-       *(uint32_t *)(ptr + 1 - (uintptr_t)from_s + (uintptr_t)to_s) +=
-           ((uint32_t)(uintptr_t)from_s - (uint32_t)(uintptr_t)to_s);
-}
 
 static void __unused
-patchfunc(void *from_s, void *from_e, void *to_s, void *to_e,
-         void *pcrel)
+patchfunc(void *from_s, void *from_e, void *to_s, void *to_e)
 {
 
        if ((uintptr_t)from_e - (uintptr_t)from_s !=
@@ -98,8 +76,6 @@
                panic("patchfunc: sizes do not match (from=%p)", from_s);
 
        memcpy(to_s, from_s, (uintptr_t)to_e - (uintptr_t)to_s);
-       if (pcrel != NULL)
-               adjust_jumpoff(pcrel, from_s, to_s);
 }
 
 static inline void __unused
@@ -233,14 +209,12 @@
                /* Faster splx(), mutex_spin_exit(). */
                patchfunc(
                    cx8_spllower, cx8_spllower_end,
-                   spllower, spllower_end,
-                   cx8_spllower_patch
+                   spllower, spllower_end
                );
 #if !defined(LOCKDEBUG)
                patchfunc(
                    i686_mutex_spin_exit, i686_mutex_spin_exit_end,
-                   mutex_spin_exit, mutex_spin_exit_end,
-                   i686_mutex_spin_exit_patch
+                   mutex_spin_exit, mutex_spin_exit_end
                );
 #endif /* !LOCKDEBUG */
        }



Home | Main Index | Thread Index | Old Index