Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc64 The s...



details:   https://anonhg.NetBSD.org/src/rev/907b496c6d87
branches:  trunk
changeset: 376645:907b496c6d87
user:      martin <martin%NetBSD.org@localhost>
date:      Tue Jun 27 07:25:55 2023 +0000

description:
The sha512 generator perl script can output a sha256 version too, but
needs a special cased invocation to do so. Fix the regen script
and update the output.

diffstat:

 crypto/external/bsd/openssl/lib/libcrypto/arch/sparc64/Makefile         |     3 +-
 crypto/external/bsd/openssl/lib/libcrypto/arch/sparc64/sha256-sparcv9.S |  1948 ++++
 crypto/external/bsd/openssl/lib/libcrypto/arch/sparc64/sha512-sparcv9.S |  4156 +++++----
 3 files changed, 4233 insertions(+), 1874 deletions(-)

diffs (truncated from 6193 to 300 lines):

diff -r 99bcefd51726 -r 907b496c6d87 crypto/external/bsd/openssl/lib/libcrypto/arch/sparc64/Makefile
--- a/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc64/Makefile   Tue Jun 27 04:41:23 2023 +0000
+++ b/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc64/Makefile   Tue Jun 27 07:25:55 2023 +0000
@@ -1,4 +1,4 @@
-#      $NetBSD: Makefile,v 1.6 2018/02/18 23:38:47 christos Exp $
+#      $NetBSD: Makefile,v 1.7 2023/06/27 07:25:55 martin Exp $
 
 .include "bsd.own.mk"
 
@@ -12,6 +12,7 @@ regen:
                j=$$(basename $$i .pl).S; \
                case $$j in \
                sparc*_modes.pl|sha1-*) perl $$i $$j;; \
+               sha512-*) perl $$i $$j; perl $$i $${j:S/512/256/};; \
                *) perl $$i > $$j;; \
                esac; \
        done
diff -r 99bcefd51726 -r 907b496c6d87 crypto/external/bsd/openssl/lib/libcrypto/arch/sparc64/sha256-sparcv9.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc64/sha256-sparcv9.S   Tue Jun 27 07:25:55 2023 +0000
@@ -0,0 +1,1948 @@
+#ifndef __ASSEMBLER__
+# define __ASSEMBLER__ 1
+#endif
+#include "crypto/sparc_arch.h"
+
+#ifdef __arch64__
+.register      %g2,#scratch
+.register      %g3,#scratch
+#endif
+
+.section       ".text",#alloc,#execinstr
+
+.align 64
+K256:
+.type  K256,#object
+       .long   0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
+       .long   0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
+       .long   0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
+       .long   0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
+       .long   0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
+       .long   0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
+       .long   0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
+       .long   0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
+       .long   0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
+       .long   0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
+       .long   0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
+       .long   0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
+       .long   0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
+       .long   0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
+       .long   0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
+       .long   0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+.size  K256,.-K256
+
+#ifdef __PIC__
+SPARC_PIC_THUNK(%g1)
+#endif
+
+.globl sha256_block_data_order
+.align 32
+sha256_block_data_order:
+       SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
+       ld      [%g1+4],%g1             ! OPENSSL_sparcv9cap_P[1]
+
+       andcc   %g1, CFR_SHA256, %g0
+       be      .Lsoftware
+       nop
+       ld      [%o0 + 0x00], %f0
+       ld      [%o0 + 0x04], %f1
+       ld      [%o0 + 0x08], %f2
+       ld      [%o0 + 0x0c], %f3
+       ld      [%o0 + 0x10], %f4
+       ld      [%o0 + 0x14], %f5
+       andcc   %o1, 0x7, %g0
+       ld      [%o0 + 0x18], %f6
+       bne,pn  %icc, .Lhwunaligned
+        ld     [%o0 + 0x1c], %f7
+
+.Lhwloop:
+       ldd     [%o1 + 0x00], %f8
+       ldd     [%o1 + 0x08], %f10
+       ldd     [%o1 + 0x10], %f12
+       ldd     [%o1 + 0x18], %f14
+       ldd     [%o1 + 0x20], %f16
+       ldd     [%o1 + 0x28], %f18
+       ldd     [%o1 + 0x30], %f20
+       subcc   %o2, 1, %o2             ! done yet?
+       ldd     [%o1 + 0x38], %f22
+       add     %o1, 0x40, %o1
+       prefetch [%o1 + 63], 20
+
+       .word   0x81b02840              ! SHA256
+
+       bne,pt  SIZE_T_CC, .Lhwloop
+       nop
+
+.Lhwfinish:
+       st      %f0, [%o0 + 0x00]       ! store context
+       st      %f1, [%o0 + 0x04]
+       st      %f2, [%o0 + 0x08]
+       st      %f3, [%o0 + 0x0c]
+       st      %f4, [%o0 + 0x10]
+       st      %f5, [%o0 + 0x14]
+       st      %f6, [%o0 + 0x18]
+       retl
+        st     %f7, [%o0 + 0x1c]
+
+.align 8
+.Lhwunaligned:
+       .word   0x93b24300 !alignaddr   %o1,%g0,%o1
+
+       ldd     [%o1 + 0x00], %f10
+.Lhwunaligned_loop:
+       ldd     [%o1 + 0x08], %f12
+       ldd     [%o1 + 0x10], %f14
+       ldd     [%o1 + 0x18], %f16
+       ldd     [%o1 + 0x20], %f18
+       ldd     [%o1 + 0x28], %f20
+       ldd     [%o1 + 0x30], %f22
+       ldd     [%o1 + 0x38], %f24
+       subcc   %o2, 1, %o2             ! done yet?
+       ldd     [%o1 + 0x40], %f26
+       add     %o1, 0x40, %o1
+       prefetch [%o1 + 63], 20
+
+       .word   0x91b2890c !faligndata  %f10,%f12,%f8
+       .word   0x95b3090e !faligndata  %f12,%f14,%f10
+       .word   0x99b38910 !faligndata  %f14,%f16,%f12
+       .word   0x9db40912 !faligndata  %f16,%f18,%f14
+       .word   0xa1b48914 !faligndata  %f18,%f20,%f16
+       .word   0xa5b50916 !faligndata  %f20,%f22,%f18
+       .word   0xa9b58918 !faligndata  %f22,%f24,%f20
+       .word   0xadb6091a !faligndata  %f24,%f26,%f22
+
+       .word   0x81b02840              ! SHA256
+
+       bne,pt  SIZE_T_CC, .Lhwunaligned_loop
+       .word   0x95b68f9a !for %f26,%f26,%f10  ! %f10=%f26
+
+       ba      .Lhwfinish
+       nop
+.align 16
+.Lsoftware:
+       save    %sp,-STACK_FRAME-0,%sp
+       and     %i1,7,%i4
+       sllx    %i2,6,%i2
+       andn    %i1,7,%i1
+       sll     %i4,3,%i4
+       add     %i1,%i2,%i2
+.Lpic: call    .+8
+       add     %o7,K256-.Lpic,%i3
+
+       ld      [%i0+0],%l0
+       ld      [%i0+4],%l1
+       ld      [%i0+8],%l2
+       ld      [%i0+12],%l3
+       ld      [%i0+16],%l4
+       ld      [%i0+20],%l5
+       ld      [%i0+24],%l6
+       ld      [%i0+28],%l7
+
+.Lloop:
+       ldx     [%i1+0],%o0
+       ldx     [%i1+16],%o2
+       ldx     [%i1+32],%o4
+       ldx     [%i1+48],%g1
+       ldx     [%i1+8],%o1
+       ldx     [%i1+24],%o3
+       subcc   %g0,%i4,%i5 ! should be 64-%i4, but -%i4 works too
+       ldx     [%i1+40],%o5
+       bz,pt   %icc,.Laligned
+       ldx     [%i1+56],%o7
+
+       sllx    %o0,%i4,%o0
+       ldx     [%i1+64],%g2
+       srlx    %o1,%i5,%g4
+       sllx    %o1,%i4,%o1
+       or      %g4,%o0,%o0
+       srlx    %o2,%i5,%g4
+       sllx    %o2,%i4,%o2
+       or      %g4,%o1,%o1
+       srlx    %o3,%i5,%g4
+       sllx    %o3,%i4,%o3
+       or      %g4,%o2,%o2
+       srlx    %o4,%i5,%g4
+       sllx    %o4,%i4,%o4
+       or      %g4,%o3,%o3
+       srlx    %o5,%i5,%g4
+       sllx    %o5,%i4,%o5
+       or      %g4,%o4,%o4
+       srlx    %g1,%i5,%g4
+       sllx    %g1,%i4,%g1
+       or      %g4,%o5,%o5
+       srlx    %o7,%i5,%g4
+       sllx    %o7,%i4,%o7
+       or      %g4,%g1,%g1
+       srlx    %g2,%i5,%g2
+       or      %g2,%o7,%o7
+.Laligned:
+       srlx    %o0,32,%g2
+       add     %l7,%g2,%g2
+       srl     %l4,6,%l7       !! 0
+       xor     %l5,%l6,%g5
+       sll     %l4,7,%g4
+       and     %l4,%g5,%g5
+       srl     %l4,11,%g3
+       xor     %g4,%l7,%l7
+       sll     %l4,21,%g4
+       xor     %g3,%l7,%l7
+       srl     %l4,25,%g3
+       xor     %g4,%l7,%l7
+       sll     %l4,26,%g4
+       xor     %g3,%l7,%l7
+       xor     %l6,%g5,%g5             ! Ch(e,f,g)
+       xor     %g4,%l7,%g3             ! Sigma1(e)
+
+       srl     %l0,2,%l7
+       add     %g5,%g2,%g2
+       ld      [%i3+0],%g5     ! K[0]
+       sll     %l0,10,%g4
+       add     %g3,%g2,%g2
+       srl     %l0,13,%g3
+       xor     %g4,%l7,%l7
+       sll     %l0,19,%g4
+       xor     %g3,%l7,%l7
+       srl     %l0,22,%g3
+       xor     %g4,%l7,%l7
+       sll     %l0,30,%g4
+       xor     %g3,%l7,%l7
+       xor     %g4,%l7,%l7             ! Sigma0(a)
+
+       or      %l0,%l1,%g3
+       and     %l0,%l1,%g4
+       and     %l2,%g3,%g3
+       or      %g3,%g4,%g4     ! Maj(a,b,c)
+       add     %g5,%g2,%g2             ! +=K[0]
+       add     %g4,%l7,%l7
+
+       add     %g2,%l3,%l3
+       add     %g2,%l7,%l7
+       add     %o0,%l6,%g2
+       srl     %l3,6,%l6       !! 1
+       xor     %l4,%l5,%g5
+       sll     %l3,7,%g4
+       and     %l3,%g5,%g5
+       srl     %l3,11,%g3
+       xor     %g4,%l6,%l6
+       sll     %l3,21,%g4
+       xor     %g3,%l6,%l6
+       srl     %l3,25,%g3
+       xor     %g4,%l6,%l6
+       sll     %l3,26,%g4
+       xor     %g3,%l6,%l6
+       xor     %l5,%g5,%g5             ! Ch(e,f,g)
+       xor     %g4,%l6,%g3             ! Sigma1(e)
+
+       srl     %l7,2,%l6
+       add     %g5,%g2,%g2
+       ld      [%i3+4],%g5     ! K[1]
+       sll     %l7,10,%g4
+       add     %g3,%g2,%g2
+       srl     %l7,13,%g3
+       xor     %g4,%l6,%l6
+       sll     %l7,19,%g4
+       xor     %g3,%l6,%l6
+       srl     %l7,22,%g3
+       xor     %g4,%l6,%l6
+       sll     %l7,30,%g4
+       xor     %g3,%l6,%l6
+       xor     %g4,%l6,%l6             ! Sigma0(a)
+
+       or      %l7,%l0,%g3
+       and     %l7,%l0,%g4
+       and     %l1,%g3,%g3
+       or      %g3,%g4,%g4     ! Maj(a,b,c)
+       add     %g5,%g2,%g2             ! +=K[1]
+       add     %g4,%l6,%l6
+
+       add     %g2,%l2,%l2
+       add     %g2,%l6,%l6
+       srlx    %o1,32,%g2
+       add     %l5,%g2,%g2
+       srl     %l2,6,%l5       !! 2
+       xor     %l3,%l4,%g5
+       sll     %l2,7,%g4
+       and     %l2,%g5,%g5
+       srl     %l2,11,%g3
+       xor     %g4,%l5,%l5
+       sll     %l2,21,%g4
+       xor     %g3,%l5,%l5
+       srl     %l2,25,%g3
+       xor     %g4,%l5,%l5
+       sll     %l2,26,%g4
+       xor     %g3,%l5,%l5
+       xor     %l4,%g5,%g5             ! Ch(e,f,g)
+       xor     %g4,%l5,%g3             ! Sigma1(e)
+
+       srl     %l6,2,%l5
+       add     %g5,%g2,%g2
+       ld      [%i3+8],%g5     ! K[2]



Home | Main Index | Thread Index | Old Index