Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/amd64/amd64 KNF, and use C-style comments. Also, re...



details:   https://anonhg.NetBSD.org/src/rev/e4a0406da44c
branches:  trunk
changeset: 342153:e4a0406da44c
user:      maxv <maxv%NetBSD.org@localhost>
date:      Wed Dec 09 16:55:18 2015 +0000

description:
KNF, and use C-style comments. Also, remove fusword/susword.

diffstat:

 sys/arch/amd64/amd64/copy.S    |  86 ++++++++++++++---------------------------
 sys/arch/amd64/amd64/mptramp.S |  24 +++++-----
 2 files changed, 42 insertions(+), 68 deletions(-)

diffs (235 lines):

diff -r 055f8e6e3436 -r e4a0406da44c sys/arch/amd64/amd64/copy.S
--- a/sys/arch/amd64/amd64/copy.S       Wed Dec 09 16:26:16 2015 +0000
+++ b/sys/arch/amd64/amd64/copy.S       Wed Dec 09 16:55:18 2015 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: copy.S,v 1.19 2015/11/21 12:34:48 maxv Exp $   */
+/*     $NetBSD: copy.S,v 1.20 2015/12/09 16:55:18 maxv Exp $   */
 
 /*
  * Copyright (c) 2001 Wasabi Systems, Inc.
@@ -119,7 +119,7 @@
  * Also note that the setup time for 'rep movs' is horrid - especially on P4
  * netburst - but on my AMD X2 it manages one copy (read+write) per clock
  * which can be achieved with a code loop, but is probably impossible to beat.
- * Howver the use of 'rep movsb' for the final bytes should be killed.
+ * However the use of 'rep movsb' for the final bytes should be killed.
  *
  * Newer Intel cpus have a much lower setup time, and may (someday)
  * be ably to do cache-line size copies....
@@ -131,35 +131,37 @@
 .Lkcopy_start:
        movq    %rdi,%rax
        subq    %rsi,%rax
-       cmpq    %rcx,%rax               # overlapping?
+       cmpq    %rcx,%rax               /* overlapping? */
        jb      1f
-       # nope, copy forward
-       shrq    $3,%rcx                 # copy by 64-bit words
+       /* nope, copy forward */
+       shrq    $3,%rcx                 /* copy by 64-bit words */
        rep
        movsq
 
        movq    %rdx,%rcx
-       andl    $7,%ecx                 # any bytes left?
+       andl    $7,%ecx                 /* any bytes left? */
        rep
        movsb
 
        xorq    %rax,%rax
        ret
 
-# Using 'rep movs' to copy backwards is not as fast as for forwards copies
-# and ought not be done when the copy doesn't acually overlap.
-# However kcopy() isn't used any that looks even vaguely used often.
-# I'm also not sure it is ever asked to do overlapping copies!
+/*
+ * Using 'rep movs' to copy backwards is not as fast as for forwards copies
+ * and ought not be done when the copy doesn't acually overlap.
+ * However kcopy() isn't used any that looks even vaguely used often.
+ * I'm also not sure it is ever asked to do overlapping copies!
+ */
 
-1:     addq    %rcx,%rdi               # copy backward
+1:     addq    %rcx,%rdi               /* copy backward */
        addq    %rcx,%rsi
        std
-       andq    $7,%rcx                 # any fractional bytes?
+       andq    $7,%rcx                 /* any fractional bytes? */
        decq    %rdi
        decq    %rsi
        rep
        movsb
-       movq    %rdx,%rcx               # copy remainder by 64-bit words
+       movq    %rdx,%rcx               /* copy remainder by 64-bit words */
        shrq    $3,%rcx
        subq    $7,%rsi
        subq    $7,%rdi
@@ -173,24 +175,24 @@
 ENTRY(copyout)
        DEFERRED_SWITCH_CHECK
 
-       xchgq   %rdi,%rsi               # kernel address to %rsi, user to %rdi
-       movq    %rdx,%rax               # save transfer length (bytes)
+       xchgq   %rdi,%rsi               /* kernel address to %rsi, user to %rdi */
+       movq    %rdx,%rax               /* save transfer length (bytes) */
 
-       addq    %rdi,%rdx               # end address to %rdx
-       jc      _C_LABEL(copy_efault)   # jump if wraps
+       addq    %rdi,%rdx               /* end address to %rdx */
+       jc      _C_LABEL(copy_efault)   /* jump if wraps */
        movq    $VM_MAXUSER_ADDRESS,%r8
        cmpq    %r8,%rdx
-       ja      _C_LABEL(copy_efault)   # jump if end in kernel space
+       ja      _C_LABEL(copy_efault)   /* jump if end in kernel space */
 
 .Lcopyout_start:
-       movq    %rax,%rcx               # length
-       shrq    $3,%rcx                 # count of 8-byte words
+       movq    %rax,%rcx               /* length */
+       shrq    $3,%rcx                 /* count of 8-byte words */
        rep
-       movsq                           # copy from %rsi to %rdi
+       movsq                           /* copy from %rsi to %rdi */
        movb    %al,%cl
-       andb    $7,%cl                  # remaining number of bytes
+       andb    $7,%cl                  /* remaining number of bytes */
        rep
-       movsb                           # copy remaining bytes
+       movsb                           /* copy remaining bytes */
 .Lcopyout_end:
        xorl    %eax,%eax
        ret
@@ -202,11 +204,11 @@
        xchgq   %rdi,%rsi
        movq    %rdx,%rax
 
-       addq    %rsi,%rdx               # Check source address not wrapped
+       addq    %rsi,%rdx               /* check source address not wrapped */
        jc      _C_LABEL(copy_efault)
-       movq    $VM_MAXUSER_ADDRESS,%r8 
+       movq    $VM_MAXUSER_ADDRESS,%r8
        cmpq    %r8,%rdx
-       ja      _C_LABEL(copy_efault)   # j if end in kernel space
+       ja      _C_LABEL(copy_efault)   /* j if end in kernel space */
 
 .Lcopyin_start:
 3:     /* bcopy(%rsi, %rdi, %rax); */
@@ -359,19 +361,7 @@
 
 7:     ret
 
-ENTRY(fusword)
-       DEFERRED_SWITCH_CHECK
-       movq    $VM_MAXUSER_ADDRESS-2,%r11
-       cmpq    %r11,%rdi
-       ja      _C_LABEL(fusuaddrfault)
-       GET_CURPCB(%rcx)
-       leaq    _C_LABEL(fusufailure)(%rip),%r11
-       movq    %r11,PCB_ONFAULT(%rcx)
-       movzwl  (%rdi),%eax
-       movq    $0,PCB_ONFAULT(%rcx)
-       ret
-       DEFERRED_SWITCH_CALL
-       
+
 ENTRY(fuswintr)
        cmpl    $TLBSTATE_VALID, CPUVAR(TLBSTATE)
        jnz     _C_LABEL(fusuaddrfault)
@@ -384,7 +374,7 @@
        movzwl  (%rdi),%eax
        movq    $0,PCB_ONFAULT(%rcx)
        ret
-       
+
 ENTRY(fubyte)
        DEFERRED_SWITCH_CHECK
        movq    $VM_MAXUSER_ADDRESS-1,%r11
@@ -398,22 +388,6 @@
        ret
        DEFERRED_SWITCH_CALL
 
-ENTRY(susword)
-       DEFERRED_SWITCH_CHECK
-       movq    $VM_MAXUSER_ADDRESS-2,%r11
-       cmpq    %r11,%rdi
-       ja      _C_LABEL(fusuaddrfault)
-
-       GET_CURPCB(%rcx)
-       leaq    _C_LABEL(fusufailure)(%rip),%r11
-       movq    %r11,PCB_ONFAULT(%rcx)
-
-       movw    %si,(%rdi)
-       xorq    %rax,%rax
-       movq    %rax,PCB_ONFAULT(%rcx)
-       ret
-       DEFERRED_SWITCH_CALL
-
 ENTRY(suswintr)
        cmpl    $TLBSTATE_VALID, CPUVAR(TLBSTATE)
        jnz     _C_LABEL(fusuaddrfault)
diff -r 055f8e6e3436 -r e4a0406da44c sys/arch/amd64/amd64/mptramp.S
--- a/sys/arch/amd64/amd64/mptramp.S    Wed Dec 09 16:26:16 2015 +0000
+++ b/sys/arch/amd64/amd64/mptramp.S    Wed Dec 09 16:55:18 2015 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: mptramp.S,v 1.18 2015/11/22 13:41:24 maxv Exp $        */
+/*     $NetBSD: mptramp.S,v 1.19 2015/12/09 16:55:18 maxv Exp $        */
 
 /*-
  * Copyright (c) 2000 The NetBSD Foundation, Inc.
@@ -125,19 +125,19 @@
        movw    %ax,%es
        movw    %ax,%ss
 #ifdef __clang__
-       lgdt (mptramp_gdt32_desc)       # load flat descriptor table
+       lgdt (mptramp_gdt32_desc)       /* load flat descriptor table */
 #else
-       data32 addr32 lgdt (mptramp_gdt32_desc)   # load flat descriptor table
+       data32 addr32 lgdt (mptramp_gdt32_desc)   /* load flat descriptor table */
 #endif
-       movl    %cr0, %eax      # get cr0
-       orl     $0x1, %eax      # enable protected mode
-       movl    %eax, %cr0      # doit
+       movl    %cr0, %eax      /* get cr0 */
+       orl     $0x1, %eax      /* enable protected mode */
+       movl    %eax, %cr0      /* doit */
        ljmpl   $0x8, $mp_startup
 
 _TRMP_LABEL(mp_startup)
        .code32
 
-       movl    $0x10, %eax     # data segment
+       movl    $0x10, %eax     /* data segment */
        movw    %ax, %ds
        movw    %ax, %ss
        movw    %ax, %es
@@ -171,16 +171,16 @@
        orl     $(EFER_LME|EFER_SCE),%eax
        wrmsr
 
-       movl    RELOC(mp_pdirpa),%ecx   # guaranteed < 4G
-       movl    %ecx,%cr3               # load ptd addr into mmu
+       movl    RELOC(mp_pdirpa),%ecx   /* guaranteed < 4G */
+       movl    %ecx,%cr3               /* load ptd addr into mmu */
 
-       movl    %cr0,%eax               # get control word
+       movl    %cr0,%eax               /* get control word */
        orl     $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_MP|CR0_WP|CR0_AM),%eax
        movl    %eax,%cr0
        jmp     mptramp_compat
 mptramp_compat:
 
-       movl    $GSEL(GDATA_SEL, SEL_KPL),%eax  #switch to new segment
+       movl    $GSEL(GDATA_SEL, SEL_KPL),%eax  /* switch to new segment */
        movl    %eax,%ds
        movl    %eax,%es
        movl    %eax,%ss
@@ -217,7 +217,7 @@
        jmp     *%rax
 
 
-_C_LABEL(cpu_spinup_trampoline_end):   #end of code copied to MP_TRAMPOLINE
+_C_LABEL(cpu_spinup_trampoline_end):   /* end of code copied to MP_TRAMPOLINE */
        /*
         * If EFER_NXE is not enabled, fetching a page with a NX bit set
         * will raise a #GP. Avoid that by setting the NXE feature now.



Home | Main Index | Thread Index | Old Index