Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/netbsd-6]: src Pull up following revision(s) (requested by christos in t...



details:   https://anonhg.NetBSD.org/src/rev/032474cdcfea
branches:  netbsd-6
changeset: 775456:032474cdcfea
user:      riz <riz%NetBSD.org@localhost>
date:      Mon Nov 19 18:38:01 2012 +0000

description:
Pull up following revision(s) (requested by christos in ticket #654):
        common/lib/libc/arch/arm/gen/modsi3.S: revision 1.1
        common/lib/libc/arch/arm/gen/umodsi3.S: revision 1.1
        sys/lib/libkern/arch/arm/Makefile.inc: revision 1.11
        sys/lib/libkern/arch/arm/Makefile.inc: revision 1.12
        common/lib/libc/arch/arm/gen/divide.S: revision 1.1
        lib/libc/arch/arm/gen/Makefile.inc: revision 1.20
        common/lib/libc/arch/arm/gen/divsi3.S: revision 1.3
        common/lib/libc/arch/arm/gen/udivsi3.S: revision 1.1
        common/lib/libc/arch/arm/gen/divsi3.S: revision 1.4
Split out modsi3 and umodsi3 from the divsi3 file. This is so that
we don't get re-defined symbols in libc from libgcc in static linking.
Example: cc -pthread -static main-calls-pthread-create.c
add split files.
add new divsi3 related files.
add new files to fix static linking
split udivsi3 and divsi3 to fix static linking. This could be done better.
Fixes PR#47139

diffstat:

 common/lib/libc/arch/arm/gen/divide.S  |  370 +++++++++++++++++++++++++++++++++
 common/lib/libc/arch/arm/gen/divsi3.S  |   18 +-
 common/lib/libc/arch/arm/gen/modsi3.S  |   31 ++
 common/lib/libc/arch/arm/gen/udivsi3.S |   23 ++
 common/lib/libc/arch/arm/gen/umodsi3.S |   30 ++
 lib/libc/arch/arm/gen/Makefile.inc     |    4 +-
 sys/lib/libkern/arch/arm/Makefile.inc  |    4 +-
 7 files changed, 459 insertions(+), 21 deletions(-)

diffs (truncated from 527 to 300 lines):

diff -r 88c0de66c731 -r 032474cdcfea common/lib/libc/arch/arm/gen/divide.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/common/lib/libc/arch/arm/gen/divide.S     Mon Nov 19 18:38:01 2012 +0000
@@ -0,0 +1,370 @@
+/*     $NetBSD: divide.S,v 1.1.2.2 2012/11/19 18:38:02 riz Exp $       */
+
+/*
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+/* 
+ * stack is aligned as there's a possibility of branching to .L_overflow
+ * which makes a C call
+ */
+
+.L_overflow:
+#if !defined(_KERNEL) && !defined(_STANDALONE)
+       mov     r0, #8                  /* SIGFPE */
+       bl      PIC_SYM(_C_LABEL(raise), PLT)   /* raise it */
+       mov     r0, #0
+#else
+       /* XXX should cause a fatal error */
+       mvn     r0, #0
+#endif
+       RET
+
+       .globl  __udivide
+__udivide:                             /* r0 = r0 / r1; r1 = r0 % r1 */
+       eor     r0, r1, r0 
+       eor     r1, r0, r1 
+       eor     r0, r1, r0 
+                                       /* r0 = r1 / r0; r1 = r1 % r0 */
+       cmp     r0, #1
+       bcc     .L_overflow
+       beq     .L_divide_l0
+       mov     ip, #0
+       movs    r1, r1
+       bpl     .L_divide_l1
+       orr     ip, ip, #0x20000000     /* ip bit 0x20000000 = -ve r1 */
+       movs    r1, r1, lsr #1
+       orrcs   ip, ip, #0x10000000     /* ip bit 0x10000000 = bit 0 of r1 */
+       b       .L_divide_l1
+
+.L_divide_l0:                          /* r0 == 1 */
+       mov     r0, r1
+       mov     r1, #0
+       RET
+
+       .globl  __divide
+__divide:                              /* r0 = r0 / r1; r1 = r0 % r1 */
+       eor     r0, r1, r0 
+       eor     r1, r0, r1 
+       eor     r0, r1, r0 
+                                       /* r0 = r1 / r0; r1 = r1 % r0 */
+       cmp     r0, #1
+       bcc     .L_overflow
+       beq     .L_divide_l0
+       ands    ip, r0, #0x80000000
+       rsbmi   r0, r0, #0
+       ands    r2, r1, #0x80000000
+       eor     ip, ip, r2
+       rsbmi   r1, r1, #0
+       orr     ip, r2, ip, lsr #1      /* ip bit 0x40000000 = -ve division */
+                                       /* ip bit 0x80000000 = -ve remainder */
+
+.L_divide_l1:
+       mov     r2, #1
+       mov     r3, #0
+
+       /*
+        * If the highest bit of the dividend is set, we have to be
+        * careful when shifting the divisor. Test this. 
+        */
+       movs    r1,r1
+       bpl     .L_old_code
+
+       /*
+        * At this point, the highest bit of r1 is known to be set.
+        * We abuse this below in the tst instructions.
+        */
+       tst     r1, r0 /*, lsl #0 */
+       bmi     .L_divide_b1
+       tst     r1, r0, lsl #1
+       bmi     .L_divide_b2
+       tst     r1, r0, lsl #2
+       bmi     .L_divide_b3
+       tst     r1, r0, lsl #3
+       bmi     .L_divide_b4
+       tst     r1, r0, lsl #4
+       bmi     .L_divide_b5
+       tst     r1, r0, lsl #5
+       bmi     .L_divide_b6
+       tst     r1, r0, lsl #6
+       bmi     .L_divide_b7
+       tst     r1, r0, lsl #7
+       bmi     .L_divide_b8
+       tst     r1, r0, lsl #8
+       bmi     .L_divide_b9
+       tst     r1, r0, lsl #9
+       bmi     .L_divide_b10
+       tst     r1, r0, lsl #10
+       bmi     .L_divide_b11
+       tst     r1, r0, lsl #11
+       bmi     .L_divide_b12
+       tst     r1, r0, lsl #12
+       bmi     .L_divide_b13
+       tst     r1, r0, lsl #13
+       bmi     .L_divide_b14
+       tst     r1, r0, lsl #14
+       bmi     .L_divide_b15
+       tst     r1, r0, lsl #15
+       bmi     .L_divide_b16
+       tst     r1, r0, lsl #16
+       bmi     .L_divide_b17
+       tst     r1, r0, lsl #17
+       bmi     .L_divide_b18
+       tst     r1, r0, lsl #18
+       bmi     .L_divide_b19
+       tst     r1, r0, lsl #19
+       bmi     .L_divide_b20
+       tst     r1, r0, lsl #20
+       bmi     .L_divide_b21
+       tst     r1, r0, lsl #21
+       bmi     .L_divide_b22
+       tst     r1, r0, lsl #22
+       bmi     .L_divide_b23
+       tst     r1, r0, lsl #23
+       bmi     .L_divide_b24
+       tst     r1, r0, lsl #24
+       bmi     .L_divide_b25
+       tst     r1, r0, lsl #25
+       bmi     .L_divide_b26
+       tst     r1, r0, lsl #26
+       bmi     .L_divide_b27
+       tst     r1, r0, lsl #27
+       bmi     .L_divide_b28
+       tst     r1, r0, lsl #28
+       bmi     .L_divide_b29
+       tst     r1, r0, lsl #29
+       bmi     .L_divide_b30
+       tst     r1, r0, lsl #30
+       bmi     .L_divide_b31
+/*
+ * instead of:
+ *     tst     r1, r0, lsl #31
+ *     bmi     .L_divide_b32
+ */
+       b       .L_divide_b32
+
+.L_old_code:
+       cmp     r1, r0
+       bcc     .L_divide_b0
+       cmp     r1, r0, lsl #1
+       bcc     .L_divide_b1
+       cmp     r1, r0, lsl #2
+       bcc     .L_divide_b2
+       cmp     r1, r0, lsl #3
+       bcc     .L_divide_b3
+       cmp     r1, r0, lsl #4
+       bcc     .L_divide_b4
+       cmp     r1, r0, lsl #5
+       bcc     .L_divide_b5
+       cmp     r1, r0, lsl #6
+       bcc     .L_divide_b6
+       cmp     r1, r0, lsl #7
+       bcc     .L_divide_b7
+       cmp     r1, r0, lsl #8
+       bcc     .L_divide_b8
+       cmp     r1, r0, lsl #9
+       bcc     .L_divide_b9
+       cmp     r1, r0, lsl #10
+       bcc     .L_divide_b10
+       cmp     r1, r0, lsl #11
+       bcc     .L_divide_b11
+       cmp     r1, r0, lsl #12
+       bcc     .L_divide_b12
+       cmp     r1, r0, lsl #13
+       bcc     .L_divide_b13
+       cmp     r1, r0, lsl #14
+       bcc     .L_divide_b14
+       cmp     r1, r0, lsl #15
+       bcc     .L_divide_b15
+       cmp     r1, r0, lsl #16
+       bcc     .L_divide_b16
+       cmp     r1, r0, lsl #17
+       bcc     .L_divide_b17
+       cmp     r1, r0, lsl #18
+       bcc     .L_divide_b18
+       cmp     r1, r0, lsl #19
+       bcc     .L_divide_b19
+       cmp     r1, r0, lsl #20
+       bcc     .L_divide_b20
+       cmp     r1, r0, lsl #21
+       bcc     .L_divide_b21
+       cmp     r1, r0, lsl #22
+       bcc     .L_divide_b22
+       cmp     r1, r0, lsl #23
+       bcc     .L_divide_b23
+       cmp     r1, r0, lsl #24
+       bcc     .L_divide_b24
+       cmp     r1, r0, lsl #25
+       bcc     .L_divide_b25
+       cmp     r1, r0, lsl #26
+       bcc     .L_divide_b26
+       cmp     r1, r0, lsl #27
+       bcc     .L_divide_b27
+       cmp     r1, r0, lsl #28
+       bcc     .L_divide_b28
+       cmp     r1, r0, lsl #29
+       bcc     .L_divide_b29
+       cmp     r1, r0, lsl #30
+       bcc     .L_divide_b30
+.L_divide_b32:
+       cmp     r1, r0, lsl #31
+       subhs   r1, r1,r0, lsl #31
+       addhs   r3, r3,r2, lsl #31
+.L_divide_b31:
+       cmp     r1, r0, lsl #30
+       subhs   r1, r1,r0, lsl #30
+       addhs   r3, r3,r2, lsl #30
+.L_divide_b30:
+       cmp     r1, r0, lsl #29
+       subhs   r1, r1,r0, lsl #29
+       addhs   r3, r3,r2, lsl #29
+.L_divide_b29:
+       cmp     r1, r0, lsl #28
+       subhs   r1, r1,r0, lsl #28
+       addhs   r3, r3,r2, lsl #28
+.L_divide_b28:
+       cmp     r1, r0, lsl #27
+       subhs   r1, r1,r0, lsl #27
+       addhs   r3, r3,r2, lsl #27
+.L_divide_b27:
+       cmp     r1, r0, lsl #26
+       subhs   r1, r1,r0, lsl #26
+       addhs   r3, r3,r2, lsl #26
+.L_divide_b26:
+       cmp     r1, r0, lsl #25
+       subhs   r1, r1,r0, lsl #25
+       addhs   r3, r3,r2, lsl #25
+.L_divide_b25:
+       cmp     r1, r0, lsl #24
+       subhs   r1, r1,r0, lsl #24
+       addhs   r3, r3,r2, lsl #24
+.L_divide_b24:
+       cmp     r1, r0, lsl #23
+       subhs   r1, r1,r0, lsl #23
+       addhs   r3, r3,r2, lsl #23
+.L_divide_b23:
+       cmp     r1, r0, lsl #22
+       subhs   r1, r1,r0, lsl #22
+       addhs   r3, r3,r2, lsl #22
+.L_divide_b22:
+       cmp     r1, r0, lsl #21
+       subhs   r1, r1,r0, lsl #21
+       addhs   r3, r3,r2, lsl #21
+.L_divide_b21:
+       cmp     r1, r0, lsl #20
+       subhs   r1, r1,r0, lsl #20
+       addhs   r3, r3,r2, lsl #20
+.L_divide_b20:
+       cmp     r1, r0, lsl #19
+       subhs   r1, r1,r0, lsl #19
+       addhs   r3, r3,r2, lsl #19
+.L_divide_b19:
+       cmp     r1, r0, lsl #18
+       subhs   r1, r1,r0, lsl #18
+       addhs   r3, r3,r2, lsl #18
+.L_divide_b18:
+       cmp     r1, r0, lsl #17
+       subhs   r1, r1,r0, lsl #17
+       addhs   r3, r3,r2, lsl #17
+.L_divide_b17:
+       cmp     r1, r0, lsl #16
+       subhs   r1, r1,r0, lsl #16
+       addhs   r3, r3,r2, lsl #16
+.L_divide_b16:
+       cmp     r1, r0, lsl #15
+       subhs   r1, r1,r0, lsl #15
+       addhs   r3, r3,r2, lsl #15
+.L_divide_b15:
+       cmp     r1, r0, lsl #14
+       subhs   r1, r1,r0, lsl #14
+       addhs   r3, r3,r2, lsl #14
+.L_divide_b14:
+       cmp     r1, r0, lsl #13
+       subhs   r1, r1,r0, lsl #13
+       addhs   r3, r3,r2, lsl #13



Home | Main Index | Thread Index | Old Index