Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-8]: src/sys/arch/i386 Pull up following revision(s) (requested by...



details:   https://anonhg.NetBSD.org/src/rev/4c20ebb480d1
branches:  netbsd-8
changeset: 434598:4c20ebb480d1
user:      martin <martin%NetBSD.org@localhost>
date:      Sun Feb 04 12:40:31 2018 +0000

description:
Pull up following revision(s) (requested by maxv519):
        sys/arch/i386/conf/largepages.inc: file removal
        sys/arch/i386/conf/kern.ldscript.4MB: file removal
        sys/arch/i386/i386/copy.S: revision 1.26
Sync with amd64, in particular, add END() markers, don't fall through
functions, narrow the copy windows, and remove suword.
Remove these files. No one cares about this on i386, and there is no
point in keeping undocumented options nobody understands anyway.

diffstat:

 sys/arch/i386/conf/kern.ldscript.4MB |   88 -------------
 sys/arch/i386/conf/largepages.inc    |    7 -
 sys/arch/i386/i386/copy.S            |  222 +++++++++++++++++-----------------
 3 files changed, 111 insertions(+), 206 deletions(-)

diffs (truncated from 755 to 300 lines):

diff -r d6d6469c8d39 -r 4c20ebb480d1 sys/arch/i386/conf/kern.ldscript.4MB
--- a/sys/arch/i386/conf/kern.ldscript.4MB      Sun Feb 04 12:10:48 2018 +0000
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,88 +0,0 @@
-/*     $NetBSD: kern.ldscript.4MB,v 1.17 2016/05/16 07:52:31 maxv Exp $        */
-
-#include "assym.h"
-
-/*
- * The large page size is 4MB in the non-PAE case.
- */
-
-__PAGE_SIZE = 0x1000 ;
-__LARGE_PAGE_SIZE = 0x400000 ;
-
-ENTRY(_start)
-SECTIONS
-{
-       .text :
-       {
-               *(.text)
-               *(.text.*)
-               *(.stub)
-       }
-       _etext = . ;
-       PROVIDE (etext = .) ;
-
-       /*
-        * Push the rodata segment up to the next large page boundary so that we
-        * can map the text segment with large pages.
-        */
-       . = ALIGN(__LARGE_PAGE_SIZE);
-
-       __rodata_start = . ;
-       .rodata :
-       {
-               *(.rodata)
-               *(.rodata.*)
-       }
-
-       . = ALIGN(__PAGE_SIZE);
-
-       __data_start = . ;
-       .data :
-       {
-               *(.data)
-       }
-
-       . = ALIGN(COHERENCY_UNIT);
-       .data.cacheline_aligned :
-       {
-               *(.data.cacheline_aligned)
-       }
-       . = ALIGN(COHERENCY_UNIT);
-       .data.read_mostly :
-       {
-               *(.data.read_mostly)
-       }
-       . = ALIGN(COHERENCY_UNIT);
-
-       _edata = . ;
-       PROVIDE (edata = .) ;
-       __bss_start = . ;
-       .bss :
-       {
-               *(.bss)
-               *(.bss.*)
-               *(COMMON)
-               . = ALIGN(32 / 8);
-       }
-
-       . = ALIGN(__PAGE_SIZE);
-
-       /* End of the kernel image */
-       __kernel_end = . ;
-
-       _end = . ;
-       PROVIDE (end = .) ;
-       .note.netbsd.ident :
-       {
-               KEEP(*(.note.netbsd.ident));
-       }
-}
-
-SECTIONS
-{
-       .text :
-       AT (ADDR(.text) & 0x0fffffff)
-       {
-               *(.text)
-       } = 0
-}
diff -r d6d6469c8d39 -r 4c20ebb480d1 sys/arch/i386/conf/largepages.inc
--- a/sys/arch/i386/conf/largepages.inc Sun Feb 04 12:10:48 2018 +0000
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-#      $NetBSD: largepages.inc,v 1.4 2015/08/21 02:02:00 uebayasi Exp $
-#
-# Options to create a kernel suitable for mapping with large
-# pages.
-#
-
-makeoptions    KERNLDSCRIPT="kern.ldscript.4MB"
diff -r d6d6469c8d39 -r 4c20ebb480d1 sys/arch/i386/i386/copy.S
--- a/sys/arch/i386/i386/copy.S Sun Feb 04 12:10:48 2018 +0000
+++ b/sys/arch/i386/i386/copy.S Sun Feb 04 12:40:31 2018 +0000
@@ -1,7 +1,6 @@
-/*     $NetBSD: copy.S,v 1.25 2016/09/16 12:28:41 maxv Exp $   */
-/*     NetBSD: locore.S,v 1.34 2005/04/01 11:59:31 yamt Exp $  */
+/*     $NetBSD: copy.S,v 1.25.8.1 2018/02/04 12:40:31 martin Exp $     */
 
-/*-
+/*
  * Copyright (c) 1998, 2000, 2004, 2008 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
@@ -30,7 +29,7 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-/*-
+/*
  * Copyright (c) 1990 The Regents of the University of California.
  * All rights reserved.
  *
@@ -65,7 +64,7 @@
  */
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: copy.S,v 1.25 2016/09/16 12:28:41 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: copy.S,v 1.25.8.1 2018/02/04 12:40:31 martin Exp $");
 
 #include "assym.h"
 
@@ -75,8 +74,8 @@
 #include <machine/cputypes.h>
 
 #define GET_CURPCB(reg)        \
-       movl    CPUVAR(CURLWP), reg; \
-       movl    L_PCB(reg), reg
+       movl    CPUVAR(CURLWP),reg; \
+       movl    L_PCB(reg),reg
 
 /*
  * These are arranged so that the abnormal case is a forwards
@@ -86,10 +85,10 @@
 #define DEFERRED_SWITCH_CHECK \
        CHECK_DEFERRED_SWITCH                   ; \
        jnz     99f                             ; \
-       98:
+98:
 
 #define DEFERRED_SWITCH_CALL \
-       99:                                             ; \
+99:                                            ; \
        call    _C_LABEL(do_pmap_load)          ; \
        jmp     98b
 
@@ -109,25 +108,26 @@
  */
 NENTRY(do_pmap_load)
        pushl   %ebp
-       movl    %esp, %ebp
+       movl    %esp,%ebp
        pushl   %ebx
-       movl    CPUVAR(CURLWP), %ebx
+       movl    CPUVAR(CURLWP),%ebx
 1:
        incl    L_NOPREEMPT(%ebx)
        call    _C_LABEL(pmap_load)
        decl    L_NOPREEMPT(%ebx)
        jnz     2f
-       cmpl    $0, L_DOPREEMPT(%ebx)
+       cmpl    $0,L_DOPREEMPT(%ebx)
        jz      2f
        pushl   $0
        call    _C_LABEL(kpreempt)
-       addl    $4, %esp
+       addl    $4,%esp
 2:
-       cmpl    $0, CPUVAR(WANT_PMAPLOAD)
+       cmpl    $0,CPUVAR(WANT_PMAPLOAD)
        jnz     1b
        popl    %ebx
        leave
        ret
+END(do_pmap_load)
 
 /*
  * void *return_address(unsigned int level);
@@ -151,12 +151,12 @@
        movl    0x4(%eax),%eax
        movl    $0,PCB_ONFAULT(%edx)
        ret
+END(return_address)
 
 /*
  * int kcopy(const void *from, void *to, size_t len);
- * Copy len bytes, abort on fault.
+ * Copy len bytes from and to kernel memory, and abort on fault.
  */
-/* LINTSTUB: Func: int kcopy(const void *from, void *to, size_t len) */
 ENTRY(kcopy)
        pushl   %esi
        pushl   %edi
@@ -206,6 +206,7 @@
        popl    %esi
        xorl    %eax,%eax
        ret
+END(kcopy)
 
 /*****************************************************************************/
 
@@ -219,110 +220,107 @@
  * Copy len bytes into the user's address space.
  * see copyout(9)
  */
-
-/* LINTSTUB: Func: int copyout(const void *kaddr, void *uaddr, size_t len) */
 ENTRY(copyout)
        DEFERRED_SWITCH_CHECK
        pushl   %esi
        pushl   %edi
-       movl    12(%esp),%esi
-       movl    16(%esp),%edi
-       movl    20(%esp),%eax
-.Lcopyout_start:
-       /*
-        * We check that the end of the destination buffer is not past the end
-        * of the user's address space.
-        */
+       movl    12(%esp),%esi   /* from */
+       movl    16(%esp),%edi   /* to */
+       movl    20(%esp),%eax   /* len */
+
        movl    %edi,%edx
        addl    %eax,%edx
        jc      _C_LABEL(copy_efault)
        cmpl    $VM_MAXUSER_ADDRESS,%edx
        ja      _C_LABEL(copy_efault)
+
+.Lcopyout_start:
        movl    %eax,%ecx
        shrl    $2,%ecx
        rep
        movsl
        andl    $3,%eax
-       jz      1f
+       jz      .Lcopyout_end
        movl    %eax,%ecx
        rep
        movsb
-1:
 .Lcopyout_end:
+
        popl    %edi
        popl    %esi
        xorl    %eax,%eax
        ret
        DEFERRED_SWITCH_CALL
+END(copyout)
 
 /*
  * int copyin(const void *from, void *to, size_t len);
  * Copy len bytes from the user's address space.
  * see copyin(9)
  */
-
-/* LINTSTUB: Func: int copyin(const void *uaddr, void *kaddr, size_t len) */
 ENTRY(copyin)
        DEFERRED_SWITCH_CHECK
        pushl   %esi
        pushl   %edi
-       movl    12(%esp),%esi
-       movl    16(%esp),%edi
-       movl    20(%esp),%eax
-       /*
-        * We check that the end of the source buffer is not past the end of
-        * the user's address space.  If it's not, then we only need to check
-        * that each page is readable, and the CPU will do that for us.
-        */
-.Lcopyin_start:
+       movl    12(%esp),%esi   /* from */
+       movl    16(%esp),%edi   /* to */
+       movl    20(%esp),%eax   /* len */
+
        movl    %esi,%edx
        addl    %eax,%edx
        jc      _C_LABEL(copy_efault)
        cmpl    $VM_MAXUSER_ADDRESS,%edx
        ja      _C_LABEL(copy_efault)
+
+.Lcopyin_start:
        movl    %eax,%ecx
        shrl    $2,%ecx
        rep
        movsl
        andl    $3,%eax
-       jz      1f
+       jz      .Lcopyin_end



Home | Main Index | Thread Index | Old Index