Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-8]: src/sys Pull up following revision(s) (requested by maxv in t...



details:   https://anonhg.NetBSD.org/src/rev/cdcd629e4eba
branches:  netbsd-8
changeset: 851009:cdcd629e4eba
user:      snj <snj%NetBSD.org@localhost>
date:      Mon Sep 04 20:41:28 2017 +0000

description:
Pull up following revision(s) (requested by maxv in ticket #257):
        sys/compat/linux/arch/amd64/linux_machdep.c: 1.52
        sys/arch/amd64/amd64/copy.S: 1.21-1.24
        sys/arch/amd64/amd64/locore.S: 1.125
        sys/arch/amd64/amd64/machdep.c: 1.256
Fix a bug in ucas_32 and ucas_64. There is a branch where they don't
initialize %rax.
--
style, reduces an incoming diff
00
Split comment, otherwise it is misleading. kcopy operates on kernel
memory, and must *not* be used with userland pages.
--
Move incq outside of the copy section. No functional change, reduces
my smap diff.
--
Remove dumb debug code and outdated comment.
--
Don't forget to clean l_md.md_flags, otherwise there may be MDL_COMPAT32,
in which case the kernel would always use iret (slower).

diffstat:

 sys/arch/amd64/amd64/copy.S                 |  82 ++++++++++++++++------------
 sys/arch/amd64/amd64/locore.S               |  10 +---
 sys/arch/amd64/amd64/machdep.c              |   6 +-
 sys/compat/linux/arch/amd64/linux_machdep.c |   6 +-
 4 files changed, 57 insertions(+), 47 deletions(-)

diffs (truncated from 334 to 300 lines):

diff -r 6c6e3a471501 -r cdcd629e4eba sys/arch/amd64/amd64/copy.S
--- a/sys/arch/amd64/amd64/copy.S       Mon Sep 04 16:11:37 2017 +0000
+++ b/sys/arch/amd64/amd64/copy.S       Mon Sep 04 20:41:28 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: copy.S,v 1.20 2015/12/09 16:55:18 maxv Exp $   */
+/*     $NetBSD: copy.S,v 1.20.10.1 2017/09/04 20:41:28 snj Exp $       */
 
 /*
  * Copyright (c) 2001 Wasabi Systems, Inc.
@@ -44,8 +44,8 @@
 #include <machine/frameasm.h>
 
 #define GET_CURPCB(reg)        \
-       movq    CPUVAR(CURLWP), reg; \
-       movq    L_PCB(reg), reg
+       movq    CPUVAR(CURLWP),reg; \
+       movq    L_PCB(reg),reg
 
 /*
  * These are arranged so that the abnormal case is a forwards
@@ -55,10 +55,10 @@
 #define DEFERRED_SWITCH_CHECK \
        CHECK_DEFERRED_SWITCH                   ; \
        jnz     99f                             ; \
-    98:
+98:
 
 #define DEFERRED_SWITCH_CALL \
-    99:                                                ; \
+99:                                            ; \
        call    _C_LABEL(do_pmap_load)          ; \
        jmp     98b
 
@@ -85,18 +85,18 @@
        pushq   %rdx
        pushq   %rcx
        pushq   %rbx
-       movq    CPUVAR(CURLWP), %rbx
+       movq    CPUVAR(CURLWP),%rbx
 1:
        incl    L_NOPREEMPT(%rbx)
        call    _C_LABEL(pmap_load)
        decl    L_NOPREEMPT(%rbx)
        jnz     2f
-       cmpl    $0, L_DOPREEMPT(%rbx)
+       cmpl    $0,L_DOPREEMPT(%rbx)
        jz      2f
-       xorq    %rdi, %rdi
+       xorq    %rdi,%rdi
        call    _C_LABEL(kpreempt)
 2:
-       cmpl    $0, CPUVAR(WANT_PMAPLOAD)
+       cmpl    $0,CPUVAR(WANT_PMAPLOAD)
        jnz     1b
        popq    %rbx
        popq    %rcx
@@ -107,9 +107,6 @@
        ret
 
 /*
- * int kcopy(const void *from, void *to, size_t len);
- * Copy len bytes, abort on fault.
- *
  * Copy routines from and to userland, plus a few more. See the
  * section 9 manpages for info. Some cases can be optimized more.
  *
@@ -125,6 +122,10 @@
  * be ably to do cache-line size copies....
  */
 
+/*
+ * int kcopy(const void *from, void *to, size_t len);
+ * Copy len bytes from and to kernel memory, and abort on fault.
+ */
 ENTRY(kcopy)
        xchgq   %rdi,%rsi
        movq    %rdx,%rcx
@@ -194,6 +195,7 @@
        rep
        movsb                           /* copy remaining bytes */
 .Lcopyout_end:
+
        xorl    %eax,%eax
        ret
        DEFERRED_SWITCH_CALL
@@ -221,6 +223,7 @@
        rep
        movsb
 .Lcopyin_end:
+
        xorl    %eax,%eax
        ret
        DEFERRED_SWITCH_CALL
@@ -256,9 +259,9 @@
        jae     1f
        movq    %rax,%rdx
        movq    %rax,%r8
-.Lcopyoutstr_start:
 1:     incq    %rdx
 
+.Lcopyoutstr_start:
 1:     decq    %rdx
        jz      2f
        lodsb
@@ -266,6 +269,7 @@
        testb   %al,%al
        jnz     1b
 .Lcopyoutstr_end:
+
        /* Success -- 0 byte reached. */
        decq    %rdx
        xorq    %rax,%rax
@@ -295,9 +299,9 @@
        jae     1f
        movq    %rax,%rdx
        movq    %rax,%r8
-.Lcopyinstr_start:
 1:     incq    %rdx
 
+.Lcopyinstr_start:
 1:     decq    %rdx
        jz      2f
        lodsb
@@ -363,7 +367,7 @@
 
 
 ENTRY(fuswintr)
-       cmpl    $TLBSTATE_VALID, CPUVAR(TLBSTATE)
+       cmpl    $TLBSTATE_VALID,CPUVAR(TLBSTATE)
        jnz     _C_LABEL(fusuaddrfault)
        movq    $VM_MAXUSER_ADDRESS-2,%r11
        cmpq    %r11,%rdi
@@ -371,7 +375,9 @@
        GET_CURPCB(%rcx)
        leaq    _C_LABEL(fusuintrfailure)(%rip),%r11
        movq    %r11,PCB_ONFAULT(%rcx)
+
        movzwl  (%rdi),%eax
+
        movq    $0,PCB_ONFAULT(%rcx)
        ret
 
@@ -383,13 +389,15 @@
        GET_CURPCB(%rcx)
        leaq    _C_LABEL(fusufailure)(%rip),%r11
        movq    %r11,PCB_ONFAULT(%rcx)
+
        movzbl  (%rdi),%eax
+
        movq    $0,PCB_ONFAULT(%rcx)
        ret
        DEFERRED_SWITCH_CALL
 
 ENTRY(suswintr)
-       cmpl    $TLBSTATE_VALID, CPUVAR(TLBSTATE)
+       cmpl    $TLBSTATE_VALID,CPUVAR(TLBSTATE)
        jnz     _C_LABEL(fusuaddrfault)
        movq    $VM_MAXUSER_ADDRESS-2,%r11
        cmpq    %r11,%rdi
@@ -397,7 +405,9 @@
        GET_CURPCB(%rcx)
        leaq    _C_LABEL(fusuintrfailure)(%rip),%r11
        movq    %r11,PCB_ONFAULT(%rcx)
+
        movw    %si,(%rdi)
+
        xorq    %rax,%rax
        movq    %rax,PCB_ONFAULT(%rcx)
        ret
@@ -413,6 +423,7 @@
        movq    %r11,PCB_ONFAULT(%rcx)
 
        movb    %sil,(%rdi)
+
        xorq    %rax,%rax
        movq    %rax,PCB_ONFAULT(%rcx)
        ret
@@ -444,21 +455,23 @@
 ENTRY(ucas_64)
        DEFERRED_SWITCH_CHECK
        /* Fail if kernel-space */
-       movq    $VM_MAXUSER_ADDRESS-8, %r8
-       cmpq    %r8, %rdi
-       ja      _C_LABEL(ucas_fault)
-       movq    %rsi, %rax
+       movq    $VM_MAXUSER_ADDRESS-8,%r8
+       cmpq    %r8,%rdi
+       ja      _C_LABEL(ucas_efault)
+       movq    %rsi,%rax
+
 .Lucas64_start:
        /* Perform the CAS */
        lock
-       cmpxchgq %rdx, (%rdi)
+       cmpxchgq %rdx,(%rdi)
 .Lucas64_end:
+
        /*
         * Note: %rax is "old" value.
         * Set the return values.
         */
-       movq    %rax, (%rcx)
-       xorq    %rax, %rax
+       movq    %rax,(%rcx)
+       xorq    %rax,%rax
        ret
        DEFERRED_SWITCH_CALL
 
@@ -468,28 +481,29 @@
 ENTRY(ucas_32)
        DEFERRED_SWITCH_CHECK
        /* Fail if kernel-space */
-       movq    $VM_MAXUSER_ADDRESS-4, %r8
-       cmpq    %r8, %rdi
-       ja      _C_LABEL(ucas_fault)
-       movl    %esi, %eax
+       movq    $VM_MAXUSER_ADDRESS-4,%r8
+       cmpq    %r8,%rdi
+       ja      _C_LABEL(ucas_efault)
+       movl    %esi,%eax
+
 .Lucas32_start:
        /* Perform the CAS */
        lock
-       cmpxchgl %edx, (%rdi)
+       cmpxchgl %edx,(%rdi)
 .Lucas32_end:
+
        /*
         * Note: %eax is "old" value.
         * Set the return values.
         */
-       movl    %eax, (%rcx)
-       xorq    %rax, %rax
+       movl    %eax,(%rcx)
+       xorq    %rax,%rax
        ret
        DEFERRED_SWITCH_CALL
 
-/*
- * Fault handler for ucas_*().
- * Just return the error set by trap().
- */
+ENTRY(ucas_efault)
+       movq    $EFAULT,%rax
+
 NENTRY(ucas_fault)
        ret
 
diff -r 6c6e3a471501 -r cdcd629e4eba sys/arch/amd64/amd64/locore.S
--- a/sys/arch/amd64/amd64/locore.S     Mon Sep 04 16:11:37 2017 +0000
+++ b/sys/arch/amd64/amd64/locore.S     Mon Sep 04 20:41:28 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: locore.S,v 1.123.6.1 2017/07/05 19:59:29 snj Exp $     */
+/*     $NetBSD: locore.S,v 1.123.6.2 2017/09/04 20:41:28 snj Exp $     */
 
 /*
  * Copyright-o-rama!
@@ -1082,12 +1082,6 @@
 
        /* Switch to newlwp's stack. */
        movq    L_PCB(%r12),%r14
-#ifdef XEN /* XXX debug code */
-       cmpq    $0,PCB_RSP(%r14)
-       jne 999f
-       callq _C_LABEL(cpu_Debugger);
-999:
-#endif
        movq    PCB_RSP(%r14),%rsp
        movq    PCB_RBP(%r14),%rbp
 
@@ -1236,8 +1230,6 @@
  * syscall()
  *
  * syscall insn entry.
- * This currently isn't much faster, but it can be made faster in the future.
- * (Actually we've already saved a few 100 clocks by not loading the trap gate)
  */
 IDTVEC(syscall)
 #ifndef XEN
diff -r 6c6e3a471501 -r cdcd629e4eba sys/arch/amd64/amd64/machdep.c
--- a/sys/arch/amd64/amd64/machdep.c    Mon Sep 04 16:11:37 2017 +0000
+++ b/sys/arch/amd64/amd64/machdep.c    Mon Sep 04 20:41:28 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: machdep.c,v 1.255 2017/03/24 17:09:36 maxv Exp $       */
+/*     $NetBSD: machdep.c,v 1.255.6.1 2017/09/04 20:41:28 snj Exp $    */
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -111,7 +111,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.255 2017/03/24 17:09:36 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.255.6.1 2017/09/04 20:41:28 snj Exp $");
 
 /* #define XENDEBUG_LOW  */
 
@@ -1325,6 +1325,8 @@
 



Home | Main Index | Thread Index | Old Index