Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/lib/libc_vfp Add EABI (aeabi) support
details: https://anonhg.NetBSD.org/src/rev/20a4e2559072
branches: trunk
changeset: 787550:20a4e2559072
user: matt <matt%NetBSD.org@localhost>
date: Sun Jun 23 06:19:55 2013 +0000
description:
Add EABI (aeabi) support
diffstat:
lib/libc_vfp/vfpdf.S | 110 ++++++++++++++++++++++++++++++++++++++++++++++++++-
lib/libc_vfp/vfpsf.S | 110 ++++++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 218 insertions(+), 2 deletions(-)
diffs (290 lines):
diff -r f9cbfd921cd7 -r 20a4e2559072 lib/libc_vfp/vfpdf.S
--- a/lib/libc_vfp/vfpdf.S Sun Jun 23 04:14:28 2013 +0000
+++ b/lib/libc_vfp/vfpdf.S Sun Jun 23 06:19:55 2013 +0000
@@ -29,7 +29,7 @@
#include <arm/asm.h>
-RCSID("$NetBSD: vfpdf.S,v 1.1 2013/01/28 17:04:40 matt Exp $")
+RCSID("$NetBSD: vfpdf.S,v 1.2 2013/06/23 06:19:55 matt Exp $")
/*
* This file provides softfloat compatible routines which use VFP instructions
@@ -50,6 +50,19 @@
#endif
#define vmov_args vmov_arg0; vmov_arg1
+#ifdef __ARM_EABI__
+#define __adddf3 __aeabi_dadd
+#define __divdf3 __aeabi_ddiv
+#define __muldf3 __aeabi_dmul
+#define __subdf3 __aeabi_dsub
+#define __negdf2 __aeabi_dneg
+#define __extendsfdf2 __aeabi_f2d
+#define __fixdfsi __aeabi_d2iz
+#define __fixunsdfsi __aeabi_d2uiz
+#define __floatsidf __aeabi_i2d
+#define __floatunsidf __aeabi_ui2d
+#endif
+
ENTRY(__adddf3)
vmov_args
vadd.f64 d0, d0, d1
@@ -64,6 +77,15 @@
RET
END(__subdf3)
+#ifdef __ARM_EABI__
+ENTRY(__aeabi_drsub)
+ vmov_args
+ vsub.f64 d0, d1, d0
+ vmov_ret
+ RET
+END(__aeabi_drsub)
+#endif
+
ENTRY(__muldf3)
vmov_args
vmul.f64 d0, d0, d1
@@ -120,6 +142,91 @@
RET
END(__floatunsidf)
+/*
+ * Effect of a floating point comparision on the condition flags.
+ * N Z C V
+ * EQ = 0 1 1 0
+ * LT = 1 0 0 0
+ * GT = 0 0 1 0
+ * UN = 0 0 1 1
+ */
+#ifdef __ARM_EABI__
+ENTRY(__aeabi_cdcmpeq)
+ vmov_args
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ RET
+END(__aeabi_cdcmpeq)
+
+ENTRY(__aeabi_cdcmple)
+ vmov_args
+ vcmpe.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ RET
+END(__aeabi_cdcmple)
+
+ENTRY(__aeabi_cdrcmple)
+ vmov_args
+ vcmpe.f64 d1, d0
+ vmrs APSR_nzcv, fpscr
+ RET
+END(__aeabi_cdrcmple)
+
+ENTRY(__aeabi_dcmpeq)
+ vmov_args
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ moveq r0, #1 /* (a == b) */
+ movne r0, #0 /* (a != b) or unordered */
+ RET
+END(__aeabi_dcmpeq)
+
+ENTRY(__aeabi_dcmplt)
+ vmov_args
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ movlt r0, #1 /* (a < b) */
+ movcs r0, #0 /* (a >= b) or unordered */
+ RET
+END(__aeabi_dcmplt)
+
+ENTRY(__aeabi_dcmple)
+ vmov_args
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ movls r0, #1 /* (a <= b) */
+ movhi r0, #0 /* (a > b) or unordered */
+ RET
+END(__aeabi_dcmple)
+
+ENTRY(__aeabi_dcmpge)
+ vmov_args
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ movge r0, #1 /* (a >= b) */
+ movlt r0, #0 /* (a < b) or unordered */
+ RET
+END(__aeabi_dcmpge)
+
+ENTRY(__aeabi_dcmpgt)
+ vmov_args
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ movgt r0, #1 /* (a > b) */
+ movle r0, #0 /* (a <= b) or unordered */
+ RET
+END(__aeabi_dcmpgt)
+
+ENTRY(__aeabi_dcmpun)
+ vmov_args
+ vcmp.f64 d0, d1
+ vmrs APSR_nzcv, fpscr
+ movvs r0, #1 /* (isnan(a) || isnan(b)) */
+ movvc r0, #0 /* !isnan(a) && !isnan(b) */
+ RET
+END(__aeabi_dcmpun)
+
+#else
/* N set if compare <= result */
/* Z set if compare = result */
/* C set if compare (=,>=,UNORD) result */
@@ -163,3 +270,4 @@
movvc r0, #0 /* isnan(a) || isnan(b) */
RET
END(__unorddf2)
+#endif /* !__ARM_EABI__ */
diff -r f9cbfd921cd7 -r 20a4e2559072 lib/libc_vfp/vfpsf.S
--- a/lib/libc_vfp/vfpsf.S Sun Jun 23 04:14:28 2013 +0000
+++ b/lib/libc_vfp/vfpsf.S Sun Jun 23 06:19:55 2013 +0000
@@ -30,7 +30,7 @@
#include <arm/asm.h>
#include <arm/vfpreg.h>
-RCSID("$NetBSD: vfpsf.S,v 1.1 2013/01/28 17:04:40 matt Exp $")
+RCSID("$NetBSD: vfpsf.S,v 1.2 2013/06/23 06:19:55 matt Exp $")
/*
* This file provides softfloat compatible routines which use VFP instructions
@@ -40,6 +40,19 @@
* This file implements the single precision floating point routines.
*/
+#ifdef __ARM_EABI__
+#define __addsf3 __aeabi_fadd
+#define __divsf3 __aeabi_fdiv
+#define __mulsf3 __aeabi_fmul
+#define __subsf3 __aeabi_fsub
+#define __negsf2 __aeabi_fneg
+#define __truncdfsf2 __aeabi_d2f
+#define __fixsfsi __aeabi_f2iz
+#define __fixunssfsi __aeabi_f2uiz
+#define __floatsisf __aeabi_i2f
+#define __floatunsisf __aeabi_ui2f
+#endif
+
ENTRY(__addsf3)
vmov s0, s1, r0, r1
vadd.f32 s0, s0, s1
@@ -54,6 +67,15 @@
RET
END(__subsf3)
+#ifdef __ARM_EABI__
+ENTRY(__aeabi_frsub)
+ vmov s0, s1, r0, r1
+ vsub.f32 s0, s1, s0
+ vmov r0, s0
+ RET
+END(__aeabi_frsub)
+#endif
+
ENTRY(__mulsf3)
vmov s0, s1, r0, r1
vmul.f32 s0, s0, s1
@@ -114,6 +136,91 @@
RET
END(__floatunsisf)
+/*
+ * Effect of a floating point comparision on the condition flags.
+ * N Z C V
+ * EQ = 0 1 1 0
+ * LT = 1 0 0 0
+ * GT = 0 0 1 0
+ * UN = 0 0 1 1
+ */
+#ifdef __ARM_EABI__
+ENTRY(__aeabi_cfcmpeq)
+ vmov s0, s1, r0, r1
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ RET
+END(__aeabi_cfcmpeq)
+
+ENTRY(__aeabi_cfcmple)
+ vmov s0, s1, r0, r1
+ vcmpe.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ RET
+END(__aeabi_cfcmple)
+
+ENTRY(__aeabi_cfrcmple)
+ vmov s0, s1, r0, r1
+ vcmpe.f32 s1, s0
+ vmrs APSR_nzcv, fpscr
+ RET
+END(__aeabi_cfrcmple)
+
+ENTRY(__aeabi_fcmpeq)
+ vmov s0, s1, r0, r1
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ moveq r0, #1 /* (a == b) */
+ movne r0, #0 /* (a != b) or unordered */
+ RET
+END(__aeabi_fcmpeq)
+
+ENTRY(__aeabi_fcmplt)
+ vmov s0, s1, r0, r1
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ movlt r0, #1 /* (a < b) */
+ movcs r0, #0 /* (a >= b) or unordered */
+ RET
+END(__aeabi_fcmplt)
+
+ENTRY(__aeabi_fcmple)
+ vmov s0, s1, r0, r1
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ movls r0, #1 /* (a <= b) */
+ movhi r0, #0 /* (a > b) or unordered */
+ RET
+END(__aeabi_fcmple)
+
+ENTRY(__aeabi_fcmpge)
+ vmov s0, s1, r0, r1
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ movge r0, #1 /* (a >= b) */
+ movlt r0, #0 /* (a < b) or unordered */
+ RET
+END(__aeabi_fcmpge)
+
+ENTRY(__aeabi_fcmpgt)
+ vmov s0, s1, r0, r1
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ movgt r0, #1 /* (a > b) */
+ movle r0, #0 /* (a <= b) or unordered */
+ RET
+END(__aeabi_fcmpgt)
+
+ENTRY(__aeabi_fcmpun)
+ vmov s0, s1, r0, r1
+ vcmp.f32 s0, s1
+ vmrs APSR_nzcv, fpscr
+ movvs r0, #1 /* (isnan(a) || isnan(b)) */
+ movvc r0, #0 /* !isnan(a) && !isnan(b) */
+ RET
+END(__aeabi_fcmpun)
+
+#else
/* N set if compare <= result */
/* Z set if compare = result */
/* C set if compare (=,>=,UNORD) result */
@@ -157,3 +264,4 @@
movvc r0, #0 /* isnan(a) || isnan(b) */
RET
END(__unordsf2)
+#endif /* !__ARM_EABI__ */
Home |
Main Index |
Thread Index |
Old Index