Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/lib/libcrypto accelerate sha1 by using asm (i386). markus@o...



details:   https://anonhg.NetBSD.org/src/rev/65995fccf811
branches:  trunk
changeset: 555265:65995fccf811
user:      itojun <itojun%NetBSD.org@localhost>
date:      Thu Nov 13 02:10:00 2003 +0000

description:
accelerate sha1 by using asm (i386).  markus@openbsd.
there's internal symbol name changes, but it does not warrant shlib minor
bump as the symbol is totally internal.

diffstat:

 lib/libcrypto/arch/i386/sha.inc    |    13 +
 lib/libcrypto/arch/i386/sha1_586.S |  2004 ++++++++++++++++++++++++++++++++++++
 lib/libcrypto/sha.inc              |     4 +-
 3 files changed, 2019 insertions(+), 2 deletions(-)

diffs (truncated from 2040 to 300 lines):

diff -r 151d0fa3f845 -r 65995fccf811 lib/libcrypto/arch/i386/sha.inc
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/libcrypto/arch/i386/sha.inc   Thu Nov 13 02:10:00 2003 +0000
@@ -0,0 +1,13 @@
+#      $NetBSD: sha.inc,v 1.1 2003/11/13 02:10:00 itojun Exp $
+#
+#      @(#) Copyright (c) 1995 Simon J. Gerraty
+#
+#      SRCS extracted from /home/current/src/lib/libcrypto/../../crypto/dist/openssl/crypto/sha/Makefile.ssl
+#
+
+.PATH: ${OPENSSLSRC}/crypto/sha
+.PATH: ${.CURDIR}/arch/i386
+
+CPPFLAGS+=     -I${OPENSSLSRC}/crypto/sha -DSHA1_ASM
+
+SRCS+=sha_dgst.c sha1dgst.c sha_one.c sha1_one.c sha1_586.S
diff -r 151d0fa3f845 -r 65995fccf811 lib/libcrypto/arch/i386/sha1_586.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/libcrypto/arch/i386/sha1_586.S        Thu Nov 13 02:10:00 2003 +0000
@@ -0,0 +1,2004 @@
+/*     $NetBSD: sha1_586.S,v 1.1 2003/11/13 02:10:00 itojun Exp $      */
+
+/* Copyright (C) 1995-1998 Eric Young (eay%cryptsoft.com@localhost)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay%cryptsoft.com@localhost).
+ * The implementation was written so as to conform with Netscapes SSL.
+ * 
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh%cryptsoft.com@localhost).
+ * 
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay%cryptsoft.com@localhost)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from 
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh%cryptsoft.com@localhost)"
+ * 
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * 
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+/*
+ * Modified from the output of `perl sha1-586.pl elf' by
+ * Markus Friedl <markus%openbsd.org@localhost> and
+ * Jun-ichiro itojun Hagino <itojun%itojun.org@localhost>
+ */
+
+#include <machine/asm.h>
+
+ENTRY(sha1_block_asm_data_order)
+       movl    12(%esp),       %ecx
+       pushl   %esi
+       sall    $6,             %ecx
+       movl    12(%esp),       %esi
+       pushl   %ebp
+       addl    %esi,           %ecx
+       pushl   %ebx
+       movl    16(%esp),       %ebp
+       pushl   %edi
+       movl    12(%ebp),       %edx
+       subl    $108,           %esp
+       movl    16(%ebp),       %edi
+       movl    8(%ebp),        %ebx
+       movl    %ecx,           68(%esp)
+       /* First we need to setup the X array */
+L000start:
+       /* First, load the words onto the stack in network byte order */
+       movl    (%esi),         %eax
+       movl    4(%esi),        %ecx
+.byte 15
+.byte 200              /* bswapl  %eax */
+.byte 15
+.byte 201              /* bswapl  %ecx */
+       movl    %eax,           (%esp)
+       movl    %ecx,           4(%esp)
+       movl    8(%esi),        %eax
+       movl    12(%esi),       %ecx
+.byte 15
+.byte 200              /* bswapl  %eax */
+.byte 15
+.byte 201              /* bswapl  %ecx */
+       movl    %eax,           8(%esp)
+       movl    %ecx,           12(%esp)
+       movl    16(%esi),       %eax
+       movl    20(%esi),       %ecx
+.byte 15
+.byte 200              /* bswapl  %eax */
+.byte 15
+.byte 201              /* bswapl  %ecx */
+       movl    %eax,           16(%esp)
+       movl    %ecx,           20(%esp)
+       movl    24(%esi),       %eax
+       movl    28(%esi),       %ecx
+.byte 15
+.byte 200              /* bswapl  %eax */
+.byte 15
+.byte 201              /* bswapl  %ecx */
+       movl    %eax,           24(%esp)
+       movl    %ecx,           28(%esp)
+       movl    32(%esi),       %eax
+       movl    36(%esi),       %ecx
+.byte 15
+.byte 200              /* bswapl  %eax */
+.byte 15
+.byte 201              /* bswapl  %ecx */
+       movl    %eax,           32(%esp)
+       movl    %ecx,           36(%esp)
+       movl    40(%esi),       %eax
+       movl    44(%esi),       %ecx
+.byte 15
+.byte 200              /* bswapl  %eax */
+.byte 15
+.byte 201              /* bswapl  %ecx */
+       movl    %eax,           40(%esp)
+       movl    %ecx,           44(%esp)
+       movl    48(%esi),       %eax
+       movl    52(%esi),       %ecx
+.byte 15
+.byte 200              /* bswapl  %eax */
+.byte 15
+.byte 201              /* bswapl  %ecx */
+       movl    %eax,           48(%esp)
+       movl    %ecx,           52(%esp)
+       movl    56(%esi),       %eax
+       movl    60(%esi),       %ecx
+.byte 15
+.byte 200              /* bswapl  %eax */
+.byte 15
+.byte 201              /* bswapl  %ecx */
+       movl    %eax,           56(%esp)
+       movl    %ecx,           60(%esp)
+       /* We now have the X array on the stack */
+       /* starting at sp-4 */
+       movl    %esi,           132(%esp)
+L001shortcut:
+
+       /* Start processing */
+       movl    (%ebp),         %eax
+       movl    4(%ebp),        %ecx
+       /* 00_15 0 */
+       movl    %ebx,           %esi
+       movl    %eax,           %ebp
+       xorl    %edx,           %esi
+       roll    $5,             %ebp
+       andl    %ecx,           %esi
+       addl    %edi,           %ebp
+.byte 209
+.byte 201              /* rorl $1 %ecx */
+       movl    (%esp),         %edi
+.byte 209
+.byte 201              /* rorl $1 %ecx */
+       xorl    %edx,           %esi
+       leal    1518500249(%ebp,%edi,1),%ebp
+       movl    %ecx,           %edi
+       addl    %ebp,           %esi
+       xorl    %ebx,           %edi
+       movl    %esi,           %ebp
+       andl    %eax,           %edi
+       roll    $5,             %ebp
+       addl    %edx,           %ebp
+       movl    4(%esp),        %edx
+.byte 209
+.byte 200              /* rorl $1 %eax */
+       xorl    %ebx,           %edi
+.byte 209
+.byte 200              /* rorl $1 %eax */
+       leal    1518500249(%ebp,%edx,1),%ebp
+       addl    %ebp,           %edi
+       /* 00_15 2 */
+       movl    %eax,           %edx
+       movl    %edi,           %ebp
+       xorl    %ecx,           %edx
+       roll    $5,             %ebp
+       andl    %esi,           %edx
+       addl    %ebx,           %ebp
+.byte 209
+.byte 206              /* rorl $1 %esi */
+       movl    8(%esp),        %ebx
+.byte 209
+.byte 206              /* rorl $1 %esi */
+       xorl    %ecx,           %edx
+       leal    1518500249(%ebp,%ebx,1),%ebp
+       movl    %esi,           %ebx
+       addl    %ebp,           %edx
+       xorl    %eax,           %ebx
+       movl    %edx,           %ebp
+       andl    %edi,           %ebx
+       roll    $5,             %ebp
+       addl    %ecx,           %ebp
+       movl    12(%esp),       %ecx
+.byte 209
+.byte 207              /* rorl $1 %edi */
+       xorl    %eax,           %ebx
+.byte 209
+.byte 207              /* rorl $1 %edi */
+       leal    1518500249(%ebp,%ecx,1),%ebp
+       addl    %ebp,           %ebx
+       /* 00_15 4 */
+       movl    %edi,           %ecx
+       movl    %ebx,           %ebp
+       xorl    %esi,           %ecx
+       roll    $5,             %ebp
+       andl    %edx,           %ecx
+       addl    %eax,           %ebp
+.byte 209
+.byte 202              /* rorl $1 %edx */
+       movl    16(%esp),       %eax
+.byte 209
+.byte 202              /* rorl $1 %edx */
+       xorl    %esi,           %ecx
+       leal    1518500249(%ebp,%eax,1),%ebp
+       movl    %edx,           %eax
+       addl    %ebp,           %ecx
+       xorl    %edi,           %eax
+       movl    %ecx,           %ebp
+       andl    %ebx,           %eax
+       roll    $5,             %ebp
+       addl    %esi,           %ebp
+       movl    20(%esp),       %esi
+.byte 209
+.byte 203              /* rorl $1 %ebx */
+       xorl    %edi,           %eax
+.byte 209
+.byte 203              /* rorl $1 %ebx */
+       leal    1518500249(%ebp,%esi,1),%ebp
+       addl    %ebp,           %eax
+       /* 00_15 6 */
+       movl    %ebx,           %esi
+       movl    %eax,           %ebp
+       xorl    %edx,           %esi
+       roll    $5,             %ebp
+       andl    %ecx,           %esi
+       addl    %edi,           %ebp
+.byte 209
+.byte 201              /* rorl $1 %ecx */
+       movl    24(%esp),       %edi
+.byte 209
+.byte 201              /* rorl $1 %ecx */
+       xorl    %edx,           %esi
+       leal    1518500249(%ebp,%edi,1),%ebp
+       movl    %ecx,           %edi
+       addl    %ebp,           %esi
+       xorl    %ebx,           %edi
+       movl    %esi,           %ebp
+       andl    %eax,           %edi
+       roll    $5,             %ebp
+       addl    %edx,           %ebp
+       movl    28(%esp),       %edx
+.byte 209
+.byte 200              /* rorl $1 %eax */
+       xorl    %ebx,           %edi
+.byte 209
+.byte 200              /* rorl $1 %eax */
+       leal    1518500249(%ebp,%edx,1),%ebp
+       addl    %ebp,           %edi
+       /* 00_15 8 */
+       movl    %eax,           %edx
+       movl    %edi,           %ebp
+       xorl    %ecx,           %edx
+       roll    $5,             %ebp
+       andl    %esi,           %edx



Home | Main Index | Thread Index | Old Index