Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/common/lib/libc/arch/arm/quad Add cfi ops.



details:   https://anonhg.NetBSD.org/src/rev/fe2f2f31ef81
branches:  trunk
changeset: 789442:fe2f2f31ef81
user:      matt <matt%NetBSD.org@localhost>
date:      Mon Aug 19 03:27:34 2013 +0000

description:
Add cfi ops.
Thumbify

diffstat:

 common/lib/libc/arch/arm/quad/__aeabi_ldivmod.S |  204 ++++++++++++++++++------
 1 files changed, 153 insertions(+), 51 deletions(-)

diffs (270 lines):

diff -r 58cada9a693e -r fe2f2f31ef81 common/lib/libc/arch/arm/quad/__aeabi_ldivmod.S
--- a/common/lib/libc/arch/arm/quad/__aeabi_ldivmod.S   Mon Aug 19 03:05:17 2013 +0000
+++ b/common/lib/libc/arch/arm/quad/__aeabi_ldivmod.S   Mon Aug 19 03:27:34 2013 +0000
@@ -29,17 +29,7 @@
 
 #include <machine/asm.h>
 
-RCSID("$NetBSD: __aeabi_ldivmod.S,v 1.11 2013/08/13 15:52:00 matt Exp $")
-
-ENTRY(__aeabi_ldivmod)
-#if !defined(_KERNEL) && !defined(_STANDALONE)
-       orrs    ip, r2, r3
-       beq     .Ldivbyzero
-#endif
-
-       push    {r4-r5, sl, lr}
-#define        NEG     r5
-       mov     NEG, #0
+RCSID("$NetBSD: __aeabi_ldivmod.S,v 1.12 2013/08/19 03:27:34 matt Exp $")
 
 #ifdef __ARMEB__
 #define        ALO     r1      /* incoming numerator, outgoing quotient */
@@ -53,54 +43,135 @@
 #define        BHI     r3      /* incoming denominator, outgoing remainder */
 #endif
 
+ENTRY(__aeabi_ldivmod)
+#ifdef __ARM_EABI__
+       .fnstart
+       .cfi_startproc
+#endif
+#if !defined(_KERNEL) && !defined(_STANDALONE)
+#if !defined(__thumb__)
+       orrs    ip, BLO, BHI
+       beq     .Ldivbyzero
+#elif defined(_ARM_ARCH_T2)
+       cbnz    BLO, 1f
+       cbz     BHI, .Ldivbyzero
+#else
+       cmp     BLO, #0
+       bne     1f
+       cmp     BHI, #0
+       beq     .Ldivbyzero
+#endif
+1:
+#endif
+
+       push    {r4-r6, lr}
+#ifdef __ARM_EABI__
+       .cfi_def_cfa_offset 16
+       .cfi_offset 14, -4
+       .cfi_offset 6, -8
+       .cfi_offset 5, -12
+       .cfi_offset 4, -16
+#endif
+#define        NEG     r5
+       movs    NEG, #0
+
        cmp     BHI, #0
        bge     2f
-       eor     NEG, NEG, #1    /* flip quotient sign */
+       movs    NEG, #1         /* flip quotient sign */
        bl      .Lnegate_b
        bcs     .Lmaxdenom
 
 2:
        cmp     AHI, #0
-       /* bge  3f */
+#ifdef __thumb__
+       bge     3f
+       movs    r4, #3
+       eors    NEG, NEG, r4    /* flip quotient sign, flip remainder sign */
+       bl      .Lnegate_a
+3:
+#else
        eorlt   NEG, NEG, #3    /* flip quotient sign, flip remainder sign */
        bllt    .Lnegate_a
-3:
+#endif
+
        /*
         * Arguments are setup, allocate some stack for the remainder
         * and call __qdivrem for the heavy lifting.
         */
+#ifdef __ARM_EABI__
+       .cfi_def_cfa_offset 32
+#endif
        sub     sp, sp, #16
-       add     ip, sp, #8
-       str     ip, [sp]
+#if !defined(__thumb__) || defined(_ARM_ARCH_T2)
+       adds    r4, sp, #8
+#else
+       mov     r4, sp
+       adds    r4, r4, #8
+#endif
+       str     r4, [sp]
        bl      PLT_SYM(__qdivrem)
        add     sp, sp, #8
+#ifdef __ARM_EABI__
+       .cfi_def_cfa_offset 24
+       .cfi_offset 3, -20
+       .cfi_offset 2, -24
+#endif
 
-       teq     NEG, #0         /* any signs to flip? */
        /*
         * The quotient is already in the right place and neither value
         * needs its sign flipped.
         */
-       popeq   {r2-r5, sl, lr}
-       RETc(eq)
+#if defined(__thumb__) && defined(_ARM_ARCH_T2)
+       cbz     NEG, .Lnegate_neither
+#else
+       cmp     NEG, #0         /* any signs to flip? */
+       beq     .Lnegate_neither
+#endif
 
-       pop     {r2, r3}
-       tst     NEG, #2         /* does remainder need to be negative? */
-       blne    .Lnegate_b
-       tst     NEG, #1         /* does quotient need to be negative? */
-       blne    .Lnegate_a
-       pop     {r4-r5, sl, lr}
+       cmp     NEG, #2         /* does remainder need to be negative? */
+       beq     .Lnegate_b_only /* 2 means b only */
+       bgt     .Lnegate_both   /* 3 means both */
+.Lnegate_a_only:
+       bl      .Lnegate_a      /* 1 means a only */
+.Lnegate_neither:
+       pop     {r2-r6, pc}     /* grab b from stack */
+.Lnegate_both:
+       bl      .Lnegate_a
+.Lnegate_b_only:
+       pop     {r2-r3}         /* get remainder */
+#ifdef __ARM_EABI__
+       .cfi_def_cfa_offset 16
+#endif
+       bl      .Lnegate_b      /* negate it */
+       pop     {r4-r6, pc}
+
+       .align  0
+.Lnegate_a:
+#ifdef __thumb__
+       movs    r4, AHI
+       movs    AHI, #0
+       negs    ALO, ALO
+       sbcs    AHI, AHI, r4
+#else
+       negs    ALO, ALO
+       rsc     AHI, AHI, #0
+#endif
        RET
 
-.Lnegate_a:
-        rsbs   ALO, ALO, #0
-        rsc    AHI, AHI, #0
+       .align  0
+.Lnegate_b:
+#ifdef __thumb__
+       movs    r4, BHI
+       movs    BHI, #0
+       negs    BLO, BLO
+       sbcs    BHI, BHI, r4
+#else
+       negs    BLO, BLO
+       rsc     BHI, BHI, #0
+#endif
        RET
 
-.Lnegate_b:
-        rsbs   BLO, BLO, #0
-        rsc    BHI, BHI, #0
-       RET
-
+       .align  0
 .Lmaxdenom:
        /*
         * We had a carry so the denominator must have INT64_MIN
@@ -108,40 +179,71 @@
         * them to see if the numerator has the same value.  We
         * don't have to worry about sign.
         */
-       teq     BHI, AHI
-       teqeq   BLO, ALO
+       cmp     BHI, AHI
+#ifdef __thumb__
+       bne     1f
+       cmp     BLO, ALO
+#else
+       cmpeq   BLO, ALO
+#endif
        bne     1f
 
        /*
         * They were equal, so we return a quotient of 1 and remainder of 0.
         */
-       mov     ALO, #1
-       mov     AHI, #0
-       mov     BLO, #0
-       mov     BHI, #0
-       pop     {r4-r5, sl, lr}
-       RET
+       movs    ALO, #1
+       movs    AHI, #0
+       movs    BLO, #0
+       movs    BHI, #0
+       pop     {r4-r6, pc}
 
        /*
         * Our remainder must be the numerator and our quotient is 0.
         */
-1:     mov     BLO, ALO
-       mov     BHI, AHI
-       mov     ALO, #0
-       mov     AHI, #0
-       pop     {r4-r5, sl, lr}
-       RET
+       .align  0
+1:     movs    BLO, ALO
+       movs    BHI, AHI
+       movs    ALO, #0
+       movs    AHI, #0
+       pop     {r4-r6, pc}
 
 #if !defined(_KERNEL) && !defined(_STANDALONE)
+       .align  0
 .Ldivbyzero:
-       push    {r0-r1, ip, lr}
+       push    {r0-r1,r4,lr}   
+#ifdef __ARM_EABI__
+       .save   {r0-r1,r4,lr}   
+       .cfi_def_cfa_offset 16
+       .cfi_offset 14, -4
+       .cfi_offset  4, -8
+#endif
        cmp     AHI, #0
+#if !defined(__thumb__) || defined(_ARM_ARCH_T2)
+#ifdef __thumb__
+       ittee   ge
+#endif
        mvnge   ALO, #0
-       mvnlt   AHI, #0x80000000
+       mvnge   AHI, #0x80000000
        movlt   ALO, #0
        movlt   AHI, #0x80000000
+#else
+       blt     1f
+       movs    ALO, #0
+       mvns    ALO, ALO
+       mov     AHI, ALO
+       lsrs    AHI, AHI, #1
+       b       2f
+1:
+       movs    ALO, #0
+       movs    AHI, #1
+       lsls    AHI, AHI, #31
+2:
+#endif /* __thumb__ && !_ARM_ARCH_T2 */
        bl      PLT_SYM(__aeabi_ldiv0)
-       pop     {r2-r3, ip, lr}
-       RET
+       pop     {r2-r4, pc}
+#endif /* !_KERNEL && !_STANDALONE */
+#ifdef __ARM_EABI__
+       .cfi_endproc
+       .fnend
 #endif
 END(__aeabi_ldivmod)



Home | Main Index | Thread Index | Old Index