Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/lib/libcrypto/arch/i386 Add support for building the assembl...



details:   https://anonhg.NetBSD.org/src/rev/1b35c80d9306
branches:  trunk
changeset: 495569:1b35c80d9306
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Mon Jul 31 19:22:04 2000 +0000

description:
Add support for building the assembly version of RMD160 from OpenSSL.

Before:
Doing rmd160 for 3s on 8 size blocks: 778828 rmd160's in 3.00s
Doing rmd160 for 3s on 64 size blocks: 430214 rmd160's in 3.00s
Doing rmd160 for 3s on 256 size blocks: 182108 rmd160's in 3.00s
Doing rmd160 for 3s on 1024 size blocks: 55050 rmd160's in 3.00s
Doing rmd160 for 3s on 8192 size blocks: 7339 rmd160's in 3.00s
type              8 bytes     64 bytes    256 bytes   1024 bytes   8192 bytes
rmd160            2076.87k     9177.90k    15539.88k    18790.40k    20040.36k

After:
Doing rmd160 for 3s on 8 size blocks: 1084941 rmd160's in 3.00s
Doing rmd160 for 3s on 64 size blocks: 617966 rmd160's in 3.00s
Doing rmd160 for 3s on 256 size blocks: 267381 rmd160's in 2.99s
Doing rmd160 for 3s on 1024 size blocks: 82001 rmd160's in 3.00s
Doing rmd160 for 3s on 8192 size blocks: 10974 rmd160's in 3.00s
type              8 bytes     64 bytes    256 bytes   1024 bytes   8192 bytes
rmd160            2893.18k    13183.27k    22892.82k    27989.67k    29966.34k

diffstat:

 lib/libcrypto/arch/i386/ripemd.inc     |    14 +
 lib/libcrypto/arch/i386/rmd_dgst_586.S |  2024 ++++++++++++++++++++++++++++++++
 2 files changed, 2038 insertions(+), 0 deletions(-)

diffs (truncated from 2046 to 300 lines):

diff -r 436901db62a5 -r 1b35c80d9306 lib/libcrypto/arch/i386/ripemd.inc
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/libcrypto/arch/i386/ripemd.inc        Mon Jul 31 19:22:04 2000 +0000
@@ -0,0 +1,14 @@
+#      $NetBSD: ripemd.inc,v 1.1 2000/07/31 19:22:04 thorpej Exp $
+#
+#      @(#) Copyright (c) 1995 Simon J. Gerraty
+#
+#      SRCS extracted from /home/current/src/lib/libcrypto/../../crypto/dist/openssl/crypto/ripemd/Makefile.ssl
+#
+
+.PATH: ${OPENSSLSRC}/crypto/ripemd
+.PATH: ${.CURDIR}/arch/i386
+
+CPPFLAGS+=     -I${OPENSSLSRC}/crypto/ripemd
+CPPFLAGS+=     -DRMD160_ASM
+
+SRCS+=rmd_dgst.c rmd_dgst_586.S rmd_one.c
diff -r 436901db62a5 -r 1b35c80d9306 lib/libcrypto/arch/i386/rmd_dgst_586.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/libcrypto/arch/i386/rmd_dgst_586.S    Mon Jul 31 19:22:04 2000 +0000
@@ -0,0 +1,2024 @@
+/*     $NetBSD: rmd_dgst_586.S,v 1.1 2000/07/31 19:22:04 thorpej Exp $ */
+
+/* Copyright (C) 1995-1998 Eric Young (eay%cryptsoft.com@localhost)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay%cryptsoft.com@localhost).
+ * The implementation was written so as to conform with Netscapes SSL.
+ * 
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh%cryptsoft.com@localhost).
+ * 
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay%cryptsoft.com@localhost)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from 
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh%cryptsoft.com@localhost)"
+ * 
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * 
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+/*
+ * Modified from the output of `perl rmd-586.pl elf' by
+ * Jason R. Thorpe <thorpej%zembu.com@localhost>.
+ */
+
+#include <machine/asm.h>
+
+ENTRY(ripemd160_block_asm_host_order)
+       movl    4(%esp),        %edx
+       movl    8(%esp),        %eax
+       pushl   %esi
+       movl    (%edx),         %ecx
+       pushl   %edi
+       movl    4(%edx),        %esi
+       pushl   %ebp
+       movl    8(%edx),        %edi
+       pushl   %ebx
+       subl    $108,           %esp
+L000start:
+
+       movl    (%eax),         %ebx
+       movl    4(%eax),        %ebp
+       movl    %ebx,           (%esp)
+       movl    %ebp,           4(%esp)
+       movl    8(%eax),        %ebx
+       movl    12(%eax),       %ebp
+       movl    %ebx,           8(%esp)
+       movl    %ebp,           12(%esp)
+       movl    16(%eax),       %ebx
+       movl    20(%eax),       %ebp
+       movl    %ebx,           16(%esp)
+       movl    %ebp,           20(%esp)
+       movl    24(%eax),       %ebx
+       movl    28(%eax),       %ebp
+       movl    %ebx,           24(%esp)
+       movl    %ebp,           28(%esp)
+       movl    32(%eax),       %ebx
+       movl    36(%eax),       %ebp
+       movl    %ebx,           32(%esp)
+       movl    %ebp,           36(%esp)
+       movl    40(%eax),       %ebx
+       movl    44(%eax),       %ebp
+       movl    %ebx,           40(%esp)
+       movl    %ebp,           44(%esp)
+       movl    48(%eax),       %ebx
+       movl    52(%eax),       %ebp
+       movl    %ebx,           48(%esp)
+       movl    %ebp,           52(%esp)
+       movl    56(%eax),       %ebx
+       movl    60(%eax),       %ebp
+       movl    %ebx,           56(%esp)
+       movl    %ebp,           60(%esp)
+       movl    %edi,           %eax
+       movl    12(%edx),       %ebx
+       movl    16(%edx),       %ebp
+       /* 0 */
+       xorl    %ebx,           %eax
+       movl    (%esp),         %edx
+       xorl    %esi,           %eax
+       addl    %edx,           %ecx
+       roll    $10,            %edi
+       addl    %eax,           %ecx
+       movl    %esi,           %eax
+       roll    $11,            %ecx
+       addl    %ebp,           %ecx
+       /* 1 */
+       xorl    %edi,           %eax
+       movl    4(%esp),        %edx
+       xorl    %ecx,           %eax
+       addl    %eax,           %ebp
+       movl    %ecx,           %eax
+       roll    $10,            %esi
+       addl    %edx,           %ebp
+       xorl    %esi,           %eax
+       roll    $14,            %ebp
+       addl    %ebx,           %ebp
+       /* 2 */
+       movl    8(%esp),        %edx
+       xorl    %ebp,           %eax
+       addl    %edx,           %ebx
+       roll    $10,            %ecx
+       addl    %eax,           %ebx
+       movl    %ebp,           %eax
+       roll    $15,            %ebx
+       addl    %edi,           %ebx
+       /* 3 */
+       xorl    %ecx,           %eax
+       movl    12(%esp),       %edx
+       xorl    %ebx,           %eax
+       addl    %eax,           %edi
+       movl    %ebx,           %eax
+       roll    $10,            %ebp
+       addl    %edx,           %edi
+       xorl    %ebp,           %eax
+       roll    $12,            %edi
+       addl    %esi,           %edi
+       /* 4 */
+       movl    16(%esp),       %edx
+       xorl    %edi,           %eax
+       addl    %edx,           %esi
+       roll    $10,            %ebx
+       addl    %eax,           %esi
+       movl    %edi,           %eax
+       roll    $5,             %esi
+       addl    %ecx,           %esi
+       /* 5 */
+       xorl    %ebx,           %eax
+       movl    20(%esp),       %edx
+       xorl    %esi,           %eax
+       addl    %eax,           %ecx
+       movl    %esi,           %eax
+       roll    $10,            %edi
+       addl    %edx,           %ecx
+       xorl    %edi,           %eax
+       roll    $8,             %ecx
+       addl    %ebp,           %ecx
+       /* 6 */
+       movl    24(%esp),       %edx
+       xorl    %ecx,           %eax
+       addl    %edx,           %ebp
+       roll    $10,            %esi
+       addl    %eax,           %ebp
+       movl    %ecx,           %eax
+       roll    $7,             %ebp
+       addl    %ebx,           %ebp
+       /* 7 */
+       xorl    %esi,           %eax
+       movl    28(%esp),       %edx
+       xorl    %ebp,           %eax
+       addl    %eax,           %ebx
+       movl    %ebp,           %eax
+       roll    $10,            %ecx
+       addl    %edx,           %ebx
+       xorl    %ecx,           %eax
+       roll    $9,             %ebx
+       addl    %edi,           %ebx
+       /* 8 */
+       movl    32(%esp),       %edx
+       xorl    %ebx,           %eax
+       addl    %edx,           %edi
+       roll    $10,            %ebp
+       addl    %eax,           %edi
+       movl    %ebx,           %eax
+       roll    $11,            %edi
+       addl    %esi,           %edi
+       /* 9 */
+       xorl    %ebp,           %eax
+       movl    36(%esp),       %edx
+       xorl    %edi,           %eax
+       addl    %eax,           %esi
+       movl    %edi,           %eax
+       roll    $10,            %ebx
+       addl    %edx,           %esi
+       xorl    %ebx,           %eax
+       roll    $13,            %esi
+       addl    %ecx,           %esi
+       /* 10 */
+       movl    40(%esp),       %edx
+       xorl    %esi,           %eax
+       addl    %edx,           %ecx
+       roll    $10,            %edi
+       addl    %eax,           %ecx
+       movl    %esi,           %eax
+       roll    $14,            %ecx
+       addl    %ebp,           %ecx
+       /* 11 */
+       xorl    %edi,           %eax
+       movl    44(%esp),       %edx
+       xorl    %ecx,           %eax
+       addl    %eax,           %ebp
+       movl    %ecx,           %eax
+       roll    $10,            %esi
+       addl    %edx,           %ebp
+       xorl    %esi,           %eax
+       roll    $15,            %ebp
+       addl    %ebx,           %ebp
+       /* 12 */
+       movl    48(%esp),       %edx
+       xorl    %ebp,           %eax
+       addl    %edx,           %ebx
+       roll    $10,            %ecx
+       addl    %eax,           %ebx
+       movl    %ebp,           %eax
+       roll    $6,             %ebx
+       addl    %edi,           %ebx
+       /* 13 */
+       xorl    %ecx,           %eax
+       movl    52(%esp),       %edx
+       xorl    %ebx,           %eax
+       addl    %eax,           %edi
+       movl    %ebx,           %eax
+       roll    $10,            %ebp
+       addl    %edx,           %edi
+       xorl    %ebp,           %eax
+       roll    $7,             %edi
+       addl    %esi,           %edi
+       /* 14 */
+       movl    56(%esp),       %edx
+       xorl    %edi,           %eax
+       addl    %edx,           %esi
+       roll    $10,            %ebx
+       addl    %eax,           %esi
+       movl    %edi,           %eax
+       roll    $9,             %esi
+       addl    %ecx,           %esi
+       /* 15 */
+       xorl    %ebx,           %eax
+       movl    60(%esp),       %edx
+       xorl    %esi,           %eax
+       addl    %eax,           %ecx
+       movl    $-1,            %eax
+       roll    $10,            %edi
+       addl    %edx,           %ecx
+       movl    28(%esp),       %edx
+       roll    $8,             %ecx
+       addl    %ebp,           %ecx
+       /* 16 */
+       addl    %edx,           %ebp
+       movl    %esi,           %edx



Home | Main Index | Thread Index | Old Index