Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/tests/sys/crypto/chacha Implement 4-way vectorization of Cha...
details: https://anonhg.NetBSD.org/src/rev/f1b25b09d6b6
branches: trunk
changeset: 936531:f1b25b09d6b6
user: riastradh <riastradh%NetBSD.org@localhost>
date: Tue Jul 28 20:08:48 2020 +0000
description:
Implement 4-way vectorization of ChaCha for armv7 NEON.
cgd performance is not as good as I was hoping (~4% improvement over
chacha_ref.c) but it should improve substantially more if we let the
cgd worker thread keep fpu state so we don't have to pay the cost of
isb and zero-the-fpu on every 512-byte cgd block.
diffstat:
sys/crypto/chacha/arch/arm/chacha_neon.c | 6 +-
sys/crypto/chacha/arch/arm/chacha_neon.h | 6 +-
sys/crypto/chacha/arch/arm/chacha_neon_32.S | 692 ++++++++++++++++++++++++++++
sys/crypto/chacha/arch/arm/files.chacha_arm | 3 +-
tests/sys/crypto/chacha/Makefile | 6 +-
5 files changed, 701 insertions(+), 12 deletions(-)
diffs (truncated from 791 to 300 lines):
diff -r c8eaf9dad71e -r f1b25b09d6b6 sys/crypto/chacha/arch/arm/chacha_neon.c
--- a/sys/crypto/chacha/arch/arm/chacha_neon.c Tue Jul 28 20:05:33 2020 +0000
+++ b/sys/crypto/chacha/arch/arm/chacha_neon.c Tue Jul 28 20:08:48 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: chacha_neon.c,v 1.6 2020/07/28 20:05:33 riastradh Exp $ */
+/* $NetBSD: chacha_neon.c,v 1.7 2020/07/28 20:08:48 riastradh Exp $ */
/*-
* Copyright (c) 2020 The NetBSD Foundation, Inc.
@@ -221,10 +221,8 @@
unsigned nr)
{
-#ifdef __aarch64__
for (; n >= 256; s += 256, n -= 256, blkno += 4)
chacha_stream256_neon(s, blkno, nonce, k, chacha_const32, nr);
-#endif
if (n) {
const uint32x4_t blkno_inc = {1,0,0,0};
@@ -281,11 +279,9 @@
unsigned nr)
{
-#ifdef __aarch64__
for (; n >= 256; s += 256, p += 256, n -= 256, blkno += 4)
chacha_stream_xor256_neon(s, p, blkno, nonce, k,
chacha_const32, nr);
-#endif
if (n) {
const uint32x4_t blkno_inc = {1,0,0,0};
diff -r c8eaf9dad71e -r f1b25b09d6b6 sys/crypto/chacha/arch/arm/chacha_neon.h
--- a/sys/crypto/chacha/arch/arm/chacha_neon.h Tue Jul 28 20:05:33 2020 +0000
+++ b/sys/crypto/chacha/arch/arm/chacha_neon.h Tue Jul 28 20:08:48 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: chacha_neon.h,v 1.2 2020/07/27 20:51:29 riastradh Exp $ */
+/* $NetBSD: chacha_neon.h,v 1.3 2020/07/28 20:08:48 riastradh Exp $ */
/*-
* Copyright (c) 2020 The NetBSD Foundation, Inc.
@@ -64,8 +64,7 @@
const uint8_t[static 32],
unsigned);
-#ifdef __aarch64__
-/* Assembly helpers -- aarch64 only for now */
+/* Assembly helpers */
void chacha_stream256_neon(uint8_t[restrict static 256], uint32_t,
const uint8_t[static 12],
const uint8_t[static 32],
@@ -78,7 +77,6 @@
const uint8_t[static 32],
const uint8_t[static 16],
unsigned);
-#endif /* __aarch64__ */
extern const struct chacha_impl chacha_neon_impl;
diff -r c8eaf9dad71e -r f1b25b09d6b6 sys/crypto/chacha/arch/arm/chacha_neon_32.S
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sys/crypto/chacha/arch/arm/chacha_neon_32.S Tue Jul 28 20:08:48 2020 +0000
@@ -0,0 +1,692 @@
+/* $NetBSD: chacha_neon_32.S,v 1.1 2020/07/28 20:08:48 riastradh Exp $ */
+
+/*-
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+RCSID("$NetBSD: chacha_neon_32.S,v 1.1 2020/07/28 20:08:48 riastradh Exp $")
+
+ .fpu neon
+
+/*
+ * ChaCha round, split up so we can interleave the quarterrounds on
+ * independent rows/diagonals to maximize pipeline efficiency, with
+ * spills to deal with the scarcity of registers. Reference:
+ *
+ * Daniel J. Bernstein, `ChaCha, a variant of Salsa20', Workshop
+ * Record of the State of the Art in Stream Ciphers -- SASC 2008.
+ * https://cr.yp.to/papers.html#chacha
+ *
+ * a += b; d ^= a; d <<<= 16;
+ * c += d; b ^= c; b <<<= 12;
+ * a += b; d ^= a; d <<<= 8;
+ * c += d; b ^= c; b <<<= 7;
+ *
+ * The rotations are implemented with:
+ * <<< 16 VREV32.16 for 16,
+ * <<< 12 VSHL/VSRI/VORR (shift left, shift right and insert, OR)
+ * <<< 8 TBL (general permutation; rot8 below stored in r)
+ * <<< 7 VSHL/VSRI/VORR
+ */
+
+.macro ROUNDLD a0,a1,a2,a3, b0,b1,b2,b3, c0,c1,c2,c3, d0,d1,d2,d3
+ vld1.32 {\c2-\c3}, [fp, :256]
+.endm
+
+.macro ROUND a0,a1,a2,a3, b0,b1,b2,b3, c0,c1,c2,c3, d0,d1,d2,d3, c0l, d0l,d0h,d1l,d1h,d2l,d2h,d3l,d3h
+ /* a += b; d ^= a; d <<<= 16 */
+ vadd.u32 \a0, \a0, \b0
+ vadd.u32 \a1, \a1, \b1
+ vadd.u32 \a2, \a2, \b2
+ vadd.u32 \a3, \a3, \b3
+
+ veor \d0, \d0, \a0
+ veor \d1, \d1, \a1
+ veor \d2, \d2, \a2
+ veor \d3, \d3, \a3
+
+ vrev32.16 \d0, \d0
+ vrev32.16 \d1, \d1
+ vrev32.16 \d2, \d2
+ vrev32.16 \d3, \d3
+
+ /* c += d; b ^= c; b <<<= 12 */
+ vadd.u32 \c0, \c0, \d0
+ vadd.u32 \c1, \c1, \d1
+ vadd.u32 \c2, \c2, \d2
+ vadd.u32 \c3, \c3, \d3
+
+ vst1.32 {\c0-\c1}, [fp, :256] /* free c0 and c1 as temps */
+
+ veor \c0, \b0, \c0
+ veor \c1, \b1, \c1
+ vshl.u32 \b0, \c0, #12
+ vshl.u32 \b1, \c1, #12
+ vsri.u32 \b0, \c0, #(32 - 12)
+ vsri.u32 \b1, \c1, #(32 - 12)
+
+ veor \c0, \b2, \c2
+ veor \c1, \b3, \c3
+ vshl.u32 \b2, \c0, #12
+ vshl.u32 \b3, \c1, #12
+ vsri.u32 \b2, \c0, #(32 - 12)
+ vsri.u32 \b3, \c1, #(32 - 12)
+
+ vld1.8 {\c0l}, [r7, :64] /* load rot8 table */
+
+ /* a += b; d ^= a; d <<<= 8 */
+ vadd.u32 \a0, \a0, \b0
+ vadd.u32 \a1, \a1, \b1
+ vadd.u32 \a2, \a2, \b2
+ vadd.u32 \a3, \a3, \b3
+
+ veor \d0, \d0, \a0
+ veor \d1, \d1, \a1
+ veor \d2, \d2, \a2
+ veor \d3, \d3, \a3
+
+ vtbl.8 \d0l, {\d0l}, \c0l /* <<< 8 */
+ vtbl.8 \d0h, {\d0h}, \c0l
+ vtbl.8 \d1l, {\d1l}, \c0l
+ vtbl.8 \d1h, {\d1h}, \c0l
+ vtbl.8 \d2l, {\d2l}, \c0l
+ vtbl.8 \d2h, {\d2h}, \c0l
+ vtbl.8 \d3l, {\d3l}, \c0l
+ vtbl.8 \d3h, {\d3h}, \c0l
+
+ vld1.32 {\c0-\c1}, [fp, :256] /* restore c0 and c1 */
+
+ /* c += d; b ^= c; b <<<= 7 */
+ vadd.u32 \c2, \c2, \d2
+ vadd.u32 \c3, \c3, \d3
+ vadd.u32 \c0, \c0, \d0
+ vadd.u32 \c1, \c1, \d1
+
+ vst1.32 {\c2-\c3}, [fp, :256] /* free c2 and c3 as temps */
+
+ veor \c2, \b2, \c2
+ veor \c3, \b3, \c3
+ vshl.u32 \b2, \c2, #7
+ vshl.u32 \b3, \c3, #7
+ vsri.u32 \b2, \c2, #(32 - 7)
+ vsri.u32 \b3, \c3, #(32 - 7)
+
+ veor \c2, \b0, \c0
+ veor \c3, \b1, \c1
+ vshl.u32 \b0, \c2, #7
+ vshl.u32 \b1, \c3, #7
+ vsri.u32 \b0, \c2, #(32 - 7)
+ vsri.u32 \b1, \c3, #(32 - 7)
+.endm
+
+#if _BYTE_ORDER == _LITTLE_ENDIAN
+#define HTOLE32(x)
+#define LE32TOH(x)
+#elif _BYTE_ORDER == _BIG_ENDIAN
+#define HTOLE32(x) vrev32.8 x, x
+#define LE32TOH(x) vrev32.8 x, x
+#endif
+
+ .text
+ .p2align 2
+.Lconstants_addr:
+ .long .Lconstants - .
+
+/*
+ * chacha_stream256_neon(uint8_t s[256]@r0,
+ * uint32_t blkno@r1,
+ * const uint8_t nonce[12]@r2,
+ * const uint8_t key[32]@r3,
+ * const uint8_t const[16]@sp[0],
+ * unsigned nr@sp[4])
+ */
+ENTRY(chacha_stream256_neon)
+ /* save callee-saves registers */
+ push {r4, r5, r6, r7, r8, r10, fp, lr}
+ vpush {d8-d15}
+
+ /* r7 := .Lconstants - .Lconstants_addr, r6 := .Lconstants_addr */
+ ldr r7, .Lconstants_addr
+ adr r6, .Lconstants_addr
+
+ /* reserve space for two 128-bit/16-byte q registers */
+ sub fp, sp, #0x20
+ bic fp, fp, #0x1f /* align */
+
+ /* get parameters */
+ add ip, sp, #96
+ add r7, r7, r6 /* r7 := .Lconstants (= v0123) */
+ ldm ip, {r4, r5} /* r4 := const, r5 := nr */
+ ldm r2, {r6, r8, r10} /* (r6, r8, r10) := nonce[0:12) */
+
+ vld1.32 {q12}, [r4] /* q12 := constant */
+ vld1.32 {q13-q14}, [r3] /* q13-q14 := key */
+ vld1.32 {q15}, [r7, :128]! /* q15 := (0, 1, 2, 3) (128-bit aligned) */
+
+ vdup.32 q0, d24[0] /* q0-q3 := constant */
+ vdup.32 q1, d24[1]
+ vdup.32 q2, d25[0]
+ vdup.32 q3, d25[1]
+ vdup.32 q12, r1 /* q12 := (blkno, blkno, blkno, blkno) */
+ vdup.32 q4, d26[0] /* q4-q11 := (key, key, key, key) */
+ vdup.32 q5, d26[1]
+ vdup.32 q6, d27[0]
+ vdup.32 q7, d27[1]
+ vdup.32 q8, d28[0]
+ vdup.32 q9, d28[1]
+ vdup.32 q10, d29[0]
+ vdup.32 q11, d29[1]
+ vadd.u32 q12, q12, q15 /* q12 := (blkno,blkno+1,blkno+2,blkno+3) */
+ vdup.32 q13, r6 /* q13-q15 := nonce */
+ vdup.32 q14, r8
+ vdup.32 q15, r10
+
+ HTOLE32(q0)
+ HTOLE32(q1)
+ HTOLE32(q2)
+ HTOLE32(q3)
+ HTOLE32(q4)
+ HTOLE32(q5)
+ HTOLE32(q6)
+ HTOLE32(q7)
+ HTOLE32(q8)
+ HTOLE32(q9)
+ HTOLE32(q10)
+ HTOLE32(q11)
+ HTOLE32(q12)
+ HTOLE32(q13)
+ HTOLE32(q14)
+ HTOLE32(q15)
+
+ b 2f
+
+ _ALIGN_TEXT
+1: ROUNDLD q0,q1,q2,q3, q5,q6,q7,q4, q10,q11,q8,q9, q15,q12,q13,q14
+2: subs r5, r5, #2
+ ROUND q0,q1,q2,q3, q4,q5,q6,q7, q8,q9,q10,q11, q12,q13,q14,q15, \
+ d16, d24,d25, d26,d27, d28,d29, d30,d31
+ ROUNDLD q0,q1,q2,q3, q4,q5,q6,q7, q8,q9,q10,q11, q12,q13,q14,q15
+ ROUND q0,q1,q2,q3, q5,q6,q7,q4, q10,q11,q8,q9, q15,q12,q13,q14, \
+ d20, d30,d31, d24,d25, d26,d27, d28,d29
+ bne 1b
+
+ /*
Home |
Main Index |
Thread Index |
Old Index