Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64 regen...



details:   https://anonhg.NetBSD.org/src/rev/253536570c32
branches:  trunk
changeset: 321241:253536570c32
user:      christos <christos%NetBSD.org@localhost>
date:      Wed Mar 07 16:05:44 2018 +0000

description:
regen; some assembly files don't work so disable for now.

diffstat:

 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/Makefile             |    14 +-
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/aes.inc              |     2 +-
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/aesv8-armx.S         |   719 +-
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/arm64cpuid.S         |   101 +
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/armv8-mont.S         |  1404 +++
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/bf.inc               |     2 +-
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/bn.inc               |     2 +
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/chacha-armv8.S       |  1968 +++++
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/chacha.inc           |     5 +
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/crypto.inc           |     6 +
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/des.inc              |     2 +-
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/ec.inc               |     4 +
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/ecp_nistz256-armv8.S |  3824 ++++++++++
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/ghashv8-armx.S       |   276 +-
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/modes.inc            |     2 +-
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/poly1305-armv8.S     |   864 ++
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/poly1305.inc         |     6 +
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/rc4.inc              |     1 -
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/sha.inc              |     7 +-
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/sha1-armv8.S         |   672 +-
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/sha512-armv8.S       |  1151 +++
 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/vpaes-armv8.S        |  1178 +++
 22 files changed, 11367 insertions(+), 843 deletions(-)

diffs (truncated from 13298 to 300 lines):

diff -r e0cfb1844778 -r 253536570c32 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/Makefile
--- a/crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/Makefile   Wed Mar 07 15:56:33 2018 +0000
+++ b/crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/Makefile   Wed Mar 07 16:05:44 2018 +0000
@@ -1,4 +1,4 @@
-#      $NetBSD: Makefile,v 1.1 2016/10/17 00:24:13 joerg Exp $
+#      $NetBSD: Makefile,v 1.2 2018/03/07 16:05:44 christos Exp $
 
 .include "bsd.own.mk"
 
@@ -6,9 +6,11 @@
 .include "${NETBSDSRCDIR}/crypto/Makefile.openssl"
 
 regen:
-       for i in ${OPENSSLSRC}/crypto/aes/asm/aesv8-armx.pl \
-                ${OPENSSLSRC}/crypto/modes/asm/ghashv8-armx.pl \
-                ${OPENSSLSRC}/crypto/sha/asm/sha1-armv8.pl; do \
-               j=$$(basename $$i .pl).S; \
-               perl $$i > $$j; \
+       for i in $$(find ${OPENSSLSRC} -name \*arm\*.pl); do \
+               case $$i in \
+               (*/charmap.pl|*/arm-xlate.pl|*/*v4*|*/*v7*);; \
+               (*) perl -I${OPENSSLSRC}/crypto/perlasm \
+               -I${OPENSSLSRC}/crypto/bn/asm $$i linux /dev/stdout \
+               > $$(basename $$i .pl).S;; \
+               esac; \
        done
diff -r e0cfb1844778 -r 253536570c32 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/aes.inc
--- a/crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/aes.inc    Wed Mar 07 15:56:33 2018 +0000
+++ b/crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/aes.inc    Wed Mar 07 16:05:44 2018 +0000
@@ -1,4 +1,4 @@
 .PATH.S: ${.PARSEDIR}
-#AES_SRCS = aesv8-armx.S
+#AES_SRCS = aesv8-armx.S aes_cbc.c
 #AESCPPFLAGS = -DAES_ASM
 .include "../../aes.inc"
diff -r e0cfb1844778 -r 253536570c32 crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/aesv8-armx.S
--- a/crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/aesv8-armx.S       Wed Mar 07 15:56:33 2018 +0000
+++ b/crypto/external/bsd/openssl/lib/libcrypto/arch/aarch64/aesv8-armx.S       Wed Mar 07 16:05:44 2018 +0000
@@ -2,11 +2,12 @@
 
 #if __ARM_MAX_ARCH__>=7
 .text
-.arch  armv7-a
+.arch  armv7-a @ don't confuse not-so-latest binutils with argv8 :-)
 .fpu   neon
 .code  32
+#undef __thumb2__
 .align 5
-rcon:
+.Lrcon:
 .long  0x01,0x01,0x01,0x01
 .long  0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d     @ rotate-n-splat
 .long  0x1b,0x1b,0x1b,0x1b
@@ -29,7 +30,7 @@
        tst     r1,#0x3f
        bne     .Lenc_key_abort
 
-       adr     r3,rcon
+       adr     r3,.Lrcon
        cmp     r1,#192
 
        veor    q0,q0,q0
@@ -47,14 +48,14 @@
        vtbl.8  d21,{q3},d5
        vext.8  q9,q0,q3,#12
        vst1.32 {q3},[r2]!
-       .byte   0x00,0x43,0xf0,0xf3     @ aese q10,q0
+.byte  0x00,0x43,0xf0,0xf3     @ aese q10,q0
        subs    r1,r1,#1
 
        veor    q3,q3,q9
        vext.8  q9,q0,q9,#12
        veor    q3,q3,q9
        vext.8  q9,q0,q9,#12
-        veor   q10,q10,q1
+       veor    q10,q10,q1
        veor    q3,q3,q9
        vshl.u8 q1,q1,#1
        veor    q3,q3,q10
@@ -66,13 +67,13 @@
        vtbl.8  d21,{q3},d5
        vext.8  q9,q0,q3,#12
        vst1.32 {q3},[r2]!
-       .byte   0x00,0x43,0xf0,0xf3     @ aese q10,q0
+.byte  0x00,0x43,0xf0,0xf3     @ aese q10,q0
 
        veor    q3,q3,q9
        vext.8  q9,q0,q9,#12
        veor    q3,q3,q9
        vext.8  q9,q0,q9,#12
-        veor   q10,q10,q1
+       veor    q10,q10,q1
        veor    q3,q3,q9
        vshl.u8 q1,q1,#1
        veor    q3,q3,q10
@@ -81,13 +82,13 @@
        vtbl.8  d21,{q3},d5
        vext.8  q9,q0,q3,#12
        vst1.32 {q3},[r2]!
-       .byte   0x00,0x43,0xf0,0xf3     @ aese q10,q0
+.byte  0x00,0x43,0xf0,0xf3     @ aese q10,q0
 
        veor    q3,q3,q9
        vext.8  q9,q0,q9,#12
        veor    q3,q3,q9
        vext.8  q9,q0,q9,#12
-        veor   q10,q10,q1
+       veor    q10,q10,q1
        veor    q3,q3,q9
        veor    q3,q3,q10
        vst1.32 {q3},[r2]
@@ -108,7 +109,7 @@
        vtbl.8  d21,{q8},d5
        vext.8  q9,q0,q3,#12
        vst1.32 {d16},[r2]!
-       .byte   0x00,0x43,0xf0,0xf3     @ aese q10,q0
+.byte  0x00,0x43,0xf0,0xf3     @ aese q10,q0
        subs    r1,r1,#1
 
        veor    q3,q3,q9
@@ -119,7 +120,7 @@
 
        vdup.32 q9,d7[1]
        veor    q9,q9,q8
-        veor   q10,q10,q1
+       veor    q10,q10,q1
        vext.8  q8,q0,q8,#12
        vshl.u8 q1,q1,#1
        veor    q8,q8,q9
@@ -144,14 +145,14 @@
        vtbl.8  d21,{q8},d5
        vext.8  q9,q0,q3,#12
        vst1.32 {q8},[r2]!
-       .byte   0x00,0x43,0xf0,0xf3     @ aese q10,q0
+.byte  0x00,0x43,0xf0,0xf3     @ aese q10,q0
        subs    r1,r1,#1
 
        veor    q3,q3,q9
        vext.8  q9,q0,q9,#12
        veor    q3,q3,q9
        vext.8  q9,q0,q9,#12
-        veor   q10,q10,q1
+       veor    q10,q10,q1
        veor    q3,q3,q9
        vshl.u8 q1,q1,#1
        veor    q3,q3,q10
@@ -160,7 +161,7 @@
 
        vdup.32 q10,d7[1]
        vext.8  q9,q0,q8,#12
-       .byte   0x00,0x43,0xf0,0xf3     @ aese q10,q0
+.byte  0x00,0x43,0xf0,0xf3     @ aese q10,q0
 
        veor    q8,q8,q9
        vext.8  q9,q0,q9,#12
@@ -177,7 +178,7 @@
 
 .Lenc_key_abort:
        mov     r0,r3                   @ return value
-       
+
        bx      lr
 .size  aes_v8_set_encrypt_key,.-aes_v8_set_encrypt_key
 
@@ -203,15 +204,15 @@
 .Loop_imc:
        vld1.32 {q0},[r2]
        vld1.32 {q1},[r0]
-       .byte   0xc0,0x03,0xb0,0xf3     @ aesimc q0,q0
-       .byte   0xc2,0x23,0xb0,0xf3     @ aesimc q1,q1
+.byte  0xc0,0x03,0xb0,0xf3     @ aesimc q0,q0
+.byte  0xc2,0x23,0xb0,0xf3     @ aesimc q1,q1
        vst1.32 {q0},[r0],r4
        vst1.32 {q1},[r2]!
        cmp     r0,r2
        bhi     .Loop_imc
 
        vld1.32 {q0},[r2]
-       .byte   0xc0,0x03,0xb0,0xf3     @ aesimc q0,q0
+.byte  0xc0,0x03,0xb0,0xf3     @ aesimc q0,q0
        vst1.32 {q0},[r0]
 
        eor     r0,r0,r0                @ return value
@@ -229,19 +230,19 @@
        vld1.32 {q1},[r2]!
 
 .Loop_enc:
-       .byte   0x00,0x43,0xb0,0xf3     @ aese q2,q0
-       .byte   0x84,0x43,0xb0,0xf3     @ aesmc q2,q2
+.byte  0x00,0x43,0xb0,0xf3     @ aese q2,q0
+.byte  0x84,0x43,0xb0,0xf3     @ aesmc q2,q2
        vld1.32 {q0},[r2]!
        subs    r3,r3,#2
-       .byte   0x02,0x43,0xb0,0xf3     @ aese q2,q1
-       .byte   0x84,0x43,0xb0,0xf3     @ aesmc q2,q2
+.byte  0x02,0x43,0xb0,0xf3     @ aese q2,q1
+.byte  0x84,0x43,0xb0,0xf3     @ aesmc q2,q2
        vld1.32 {q1},[r2]!
        bgt     .Loop_enc
 
-       .byte   0x00,0x43,0xb0,0xf3     @ aese q2,q0
-       .byte   0x84,0x43,0xb0,0xf3     @ aesmc q2,q2
+.byte  0x00,0x43,0xb0,0xf3     @ aese q2,q0
+.byte  0x84,0x43,0xb0,0xf3     @ aesmc q2,q2
        vld1.32 {q0},[r2]
-       .byte   0x02,0x43,0xb0,0xf3     @ aese q2,q1
+.byte  0x02,0x43,0xb0,0xf3     @ aese q2,q1
        veor    q2,q2,q0
 
        vst1.8  {q2},[r1]
@@ -258,19 +259,19 @@
        vld1.32 {q1},[r2]!
 
 .Loop_dec:
-       .byte   0x40,0x43,0xb0,0xf3     @ aesd q2,q0
-       .byte   0xc4,0x43,0xb0,0xf3     @ aesimc q2,q2
+.byte  0x40,0x43,0xb0,0xf3     @ aesd q2,q0
+.byte  0xc4,0x43,0xb0,0xf3     @ aesimc q2,q2
        vld1.32 {q0},[r2]!
        subs    r3,r3,#2
-       .byte   0x42,0x43,0xb0,0xf3     @ aesd q2,q1
-       .byte   0xc4,0x43,0xb0,0xf3     @ aesimc q2,q2
+.byte  0x42,0x43,0xb0,0xf3     @ aesd q2,q1
+.byte  0xc4,0x43,0xb0,0xf3     @ aesimc q2,q2
        vld1.32 {q1},[r2]!
        bgt     .Loop_dec
 
-       .byte   0x40,0x43,0xb0,0xf3     @ aesd q2,q0
-       .byte   0xc4,0x43,0xb0,0xf3     @ aesimc q2,q2
+.byte  0x40,0x43,0xb0,0xf3     @ aesd q2,q0
+.byte  0xc4,0x43,0xb0,0xf3     @ aesimc q2,q2
        vld1.32 {q0},[r2]
-       .byte   0x42,0x43,0xb0,0xf3     @ aesd q2,q1
+.byte  0x42,0x43,0xb0,0xf3     @ aesd q2,q1
        veor    q2,q2,q0
 
        vst1.8  {q2},[r1]
@@ -281,9 +282,9 @@
 .align 5
 aes_v8_cbc_encrypt:
        mov     ip,sp
-       stmdb   sp!,{r4-r8,lr}
-       vstmdb  sp!,{d8-d15}            @ ABI specification says so
-       ldmia   ip,{r4-r5}              @ load remaining args
+       stmdb   sp!,{r4,r5,r6,r7,r8,lr}
+       vstmdb  sp!,{d8,d9,d10,d11,d12,d13,d14,d15}            @ ABI specification says so
+       ldmia   ip,{r4,r5}              @ load remaining args
        subs    r2,r2,#16
        mov     r8,#16
        blo     .Lcbc_abort
@@ -295,13 +296,13 @@
        vld1.8  {q6},[r4]
        vld1.8  {q0},[r0],r8
 
-       vld1.32 {q8-q9},[r3]            @ load key schedule...
+       vld1.32 {q8,q9},[r3]            @ load key schedule...
        sub     r5,r5,#6
        add     r7,r3,r5,lsl#4  @ pointer to last 7 round keys
        sub     r5,r5,#2
-       vld1.32 {q10-q11},[r7]!
-       vld1.32 {q12-q13},[r7]!
-       vld1.32 {q14-q15},[r7]!
+       vld1.32 {q10,q11},[r7]!
+       vld1.32 {q12,q13},[r7]!
+       vld1.32 {q14,q15},[r7]!
        vld1.32 {q7},[r7]
 
        add     r7,r3,#32
@@ -313,62 +314,62 @@
        veor    q5,q8,q7
        beq     .Lcbc_enc128
 
-       vld1.32 {q2-q3},[r7]
+       vld1.32 {q2,q3},[r7]
        add     r7,r3,#16
        add     r6,r3,#16*4
        add     r12,r3,#16*5
-       .byte   0x20,0x03,0xb0,0xf3     @ aese q0,q8
-       .byte   0x80,0x03,0xb0,0xf3     @ aesmc q0,q0
+.byte  0x20,0x03,0xb0,0xf3     @ aese q0,q8
+.byte  0x80,0x03,0xb0,0xf3     @ aesmc q0,q0
        add     r14,r3,#16*6
        add     r3,r3,#16*7
        b       .Lenter_cbc_enc
 
 .align 4
 .Loop_cbc_enc:
-       .byte   0x20,0x03,0xb0,0xf3     @ aese q0,q8
-       .byte   0x80,0x03,0xb0,0xf3     @ aesmc q0,q0
-        vst1.8 {q6},[r1]!
+.byte  0x20,0x03,0xb0,0xf3     @ aese q0,q8
+.byte  0x80,0x03,0xb0,0xf3     @ aesmc q0,q0
+       vst1.8  {q6},[r1]!
 .Lenter_cbc_enc:
-       .byte   0x22,0x03,0xb0,0xf3     @ aese q0,q9
-       .byte   0x80,0x03,0xb0,0xf3     @ aesmc q0,q0
-       .byte   0x04,0x03,0xb0,0xf3     @ aese q0,q2
-       .byte   0x80,0x03,0xb0,0xf3     @ aesmc q0,q0
+.byte  0x22,0x03,0xb0,0xf3     @ aese q0,q9
+.byte  0x80,0x03,0xb0,0xf3     @ aesmc q0,q0



Home | Main Index | Thread Index | Old Index