diff options
author | Jussi Kivilinna <jussi.kivilinna@iki.fi> | 2013-08-31 12:48:31 +0300 |
---|---|---|
committer | Jussi Kivilinna <jussi.kivilinna@iki.fi> | 2013-08-31 13:31:34 +0300 |
commit | 99d15543b8d94a8f1ef66c6ccb862b0ce82c514d (patch) | |
tree | 1aa148d6c41647926f23607d7851a8d1e3f33aff | |
parent | 03da7f8ba3ec24d4639a2bcebbc0d9d831734c08 (diff) | |
download | libgcrypt-99d15543b8d94a8f1ef66c6ccb862b0ce82c514d.tar.gz |
sha512: add ARM/NEON assembly version of transform function
* cipher/Makefile.am: Add 'sha512-armv7-neon.S'.
* cipher/sha512-armv7-neon.S: New file.
* cipher/sha512.c (USE_ARM_NEON_ASM): New macro.
(SHA512_CONTEXT) [USE_ARM_NEON_ASM]: Add 'use_neon'.
(sha512_init, sha384_init) [USE_ARM_NEON_ASM]: Enable 'use_neon' if
CPU support NEON instructions.
(k): Round constant array moved outside of 'transform' function.
(__transform): Renamed from 'tranform' function.
[USE_ARM_NEON_ASM] (_gcry_sha512_transform_armv7_neon): New prototype.
(transform): New wrapper function for different transform versions.
(sha512_write, sha512_final): Burn stack by the amount returned by
transform function.
* configure.ac (sha512) [neonsupport]: Add 'sha512-armv7-neon.lo'.
--
Add NEON assembly for transform function for faster SHA512 on ARM. Major speed
up thanks to 64-bit integer registers and large register file that can hold
full input buffer.
Benchmark results on Cortex-A8, 1Ghz:
Old:
$ tests/benchmark --hash-repetitions 100 md sha512 sha384
SHA512 17050ms 18780ms 29120ms 18040ms 17190ms
SHA384 17130ms 18720ms 29160ms 18090ms 17280ms
New:
$ tests/benchmark --hash-repetitions 100 md sha512 sha384
SHA512 3600ms 5070ms 15330ms 4510ms 3480ms
SHA384 3590ms 5060ms 15350ms 4510ms 3520ms
New vs old:
SHA512 4.74x 3.70x 1.90x 4.00x 4.94x
SHA384 4.77x 3.70x 1.90x 4.01x 4.91x
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
-rw-r--r-- | cipher/Makefile.am | 2 | ||||
-rw-r--r-- | cipher/sha512-armv7-neon.S | 316 | ||||
-rw-r--r-- | cipher/sha512.c | 150 | ||||
-rw-r--r-- | configure.ac | 5 |
4 files changed, 422 insertions, 51 deletions
diff --git a/cipher/Makefile.am b/cipher/Makefile.am index e233e79f..3dd6f88f 100644 --- a/cipher/Makefile.am +++ b/cipher/Makefile.am @@ -73,7 +73,7 @@ seed.c \ serpent.c serpent-sse2-amd64.S serpent-avx2-amd64.S \ sha1.c \ sha256.c \ -sha512.c \ +sha512.c sha512-armv7-neon.S \ tiger.c \ whirlpool.c \ twofish.c twofish-amd64.S \ diff --git a/cipher/sha512-armv7-neon.S b/cipher/sha512-armv7-neon.S new file mode 100644 index 00000000..042b15a6 --- /dev/null +++ b/cipher/sha512-armv7-neon.S @@ -0,0 +1,316 @@ +/* sha512-armv7-neon.S - ARM/NEON assembly implementation of SHA-512 transform + * + * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <config.h> + +#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \ + defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \ + defined(HAVE_GCC_INLINE_ASM_NEON) + +.text + +.syntax unified +.fpu neon +.arm + +/* structure of SHA512_CONTEXT */ +#define hd_a 0 +#define hd_b ((hd_a) + 8) +#define hd_c ((hd_b) + 8) +#define hd_d ((hd_c) + 8) +#define hd_e ((hd_d) + 8) +#define hd_f ((hd_e) + 8) +#define hd_g ((hd_f) + 8) + +/* register macros */ +#define RK %r2 + +#define RA d0 +#define RB d1 +#define RC d2 +#define RD d3 +#define RE d4 +#define RF d5 +#define RG d6 +#define RH d7 + +#define RT0 d8 +#define RT1 d9 +#define RT2 d10 +#define RT3 d11 +#define RT4 d12 +#define RT5 d13 +#define RT6 d14 +#define RT7 d15 + +#define RW0 d16 +#define RW1 d17 +#define RW2 d18 +#define RW3 d19 +#define RW4 d20 +#define RW5 d21 +#define RW6 d22 +#define RW7 d23 +#define RW8 d24 +#define RW9 d25 +#define RW10 d26 +#define RW11 d27 +#define RW12 d28 +#define RW13 d29 +#define RW14 d30 +#define RW15 d31 + +#define RW01q q8 +#define RW23q q9 +#define RW45q q10 +#define RW67q q11 +#define RW89q q12 +#define RW1011q q13 +#define RW1213q q14 +#define RW1415q q15 + +/*********************************************************************** + * ARM assembly implementation of sha512 transform + ***********************************************************************/ +#define round_0_63(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw14, rw9, rw1) \ + /* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \ + vshr.u64 RT1, re, #14; \ + vshl.u64 RT3, re, #64 - 14; \ + vshr.u64 RT4, re, #18; \ + vshl.u64 RT5, re, #64 - 18; \ + veor.64 RT1, RT1, RT3; \ + vld1.64 {RT0}, [RK]!; \ + veor.64 RT1, RT1, RT4; \ + vshr.u64 RT3, re, #41; \ + vshl.u64 RT4, re, #64 - 41; \ + veor.64 RT1, RT1, RT5; \ + vadd.u64 RT0, RT0, rw0; \ + veor.64 RT1, RT1, RT3; \ + vand.64 RT2, re, rf; \ + veor.64 RT1, RT1, RT4; \ + vbic.64 RT6, rg, re; \ + \ + vadd.u64 RT1, RT1, rh; \ + veor.64 RT2, RT2, RT6; \ + vshr.u64 rh, ra, #28; \ + vshl.u64 RT3, ra, #64 - 28; \ + vadd.u64 RT1, RT1, RT0; \ + vshr.u64 RT4, ra, #34; \ + veor.64 rh, rh, RT3; \ + vshl.u64 RT5, ra, #64 - 34; \ + vadd.u64 RT1, RT1, RT2; \ + \ + /* h = Sum0 (a) + Maj (a, b, c); */ \ + veor.64 rh, rh, RT4; \ + vshr.u64 RT3, ra, #39; \ + vshl.u64 RT4, ra, #64 - 39; \ + vorr.64 RT6, ra, rb; \ + vand.64 RT0, ra, rb; \ + veor.64 rh, rh, RT5; \ + vand.64 RT6, RT6, rc; \ + veor.64 rh, rh, RT3; \ + vorr.64 RT0, RT0, RT6; \ + veor.64 rh, rh, RT4; \ + vshr.u64 RT4, rw14, #19; \ + vadd.u64 rh, rh, RT0; \ + vshl.u64 RT2, rw14, #64 - 19; \ + \ + /* w[0] += S1 (w[14]) + w[9] + S0 (w[1]); */ \ + vshr.u64 RT3, rw14, #61; \ + vshl.u64 RT6, rw14, #64 - 61; \ + veor.64 RT0, RT4, RT2; \ + vshr.u64 RT2, rw14, 6; \ + veor.64 RT0, RT0, RT3; \ + vshr.u64 RT7, rw1, #1; \ + veor.64 RT0, RT0, RT6; \ + vshl.u64 RT4, rw1, #64 - 1; \ + veor.64 RT0, RT0, RT2; \ + vshr.u64 RT5, rw1, #8; \ + vadd.u64 rw0, rw0, RT0; \ + vshl.u64 RT6, rw1, #64 - 8; \ + veor.64 RT7, RT7, RT4; \ + vshr.u64 RT4, rw1, 7; \ + veor.64 RT7, RT7, RT5; \ + vadd.u64 rw0, rw0, rw9; /* w[0]+=w[9]; */\ + veor.64 RT7, RT7, RT6; \ + vadd.u64 rd, rd, RT1; /* d+=t1; */ \ + veor.64 RT7, RT7, RT4; \ + vadd.u64 rh, rh, RT1; /* h+=t1; */ \ + vadd.u64 rw0, rw0, RT7; \ + +#define round_64_79(ra, rb, rc, rd, re, rf, rg, rh, rw0) \ + /* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \ + vld1.64 {RT0}, [RK]!; \ + vshr.u64 RT1, re, #14; \ + vshl.u64 RT3, re, #64 - 14; \ + vshr.u64 RT4, re, #18; \ + vshl.u64 RT5, re, #64 - 18; \ + veor.64 RT1, RT1, RT3; \ + vshr.u64 RT7, ra, #28; \ + veor.64 RT1, RT1, RT4; \ + vshr.u64 RT3, re, #41; \ + vshl.u64 RT4, re, #64 - 41; \ + veor.64 RT1, RT1, RT5; \ + vadd.u64 RT0, RT0, rw0; \ + veor.64 RT1, RT1, RT3; \ + vand.64 RT2, re, rf; \ + veor.64 RT1, RT1, RT4; \ + vbic.64 RT6, rg, re; \ + \ + vadd.u64 RT1, RT1, rh; \ + veor.64 RT2, RT2, RT6; \ + vadd.u64 RT1, RT1, RT0; \ + vshr.u64 RT4, ra, #34; \ + vshl.u64 RT5, ra, #64 - 34; \ + \ + /* t7 = Sum0 (a) + Maj (a, b, c); */ \ + vshl.u64 RT6, ra, #64 - 28; \ + veor.64 RT7, RT7, RT4; \ + vshr.u64 RT3, ra, #39; \ + veor.64 RT7, RT7, RT6; \ + vshl.u64 RT4, ra, #64 - 39; \ + vorr.64 RT6, ra, rb; \ + vand.64 RT0, ra, rb; \ + veor.64 RT7, RT7, RT5; \ + vand.64 RT6, RT6, rc; \ + veor.64 RT7, RT7, RT3; \ + vorr.64 RT0, RT0, RT6; \ + veor.64 RT7, RT7, RT4; \ + vadd.u64 RT1, RT1, RT2; \ + vadd.u64 RT7, RT7, RT0; \ + vadd.u64 rd, rd, RT1; /* d+=t1; */ \ + vadd.u64 rh, RT7, RT1; /* h=t7+t1; */ + +.align 3 +.globl _gcry_sha512_transform_armv7_neon +.type _gcry_sha512_transform_armv7_neon,%function; + +_gcry_sha512_transform_armv7_neon: + /* Input: + * %r0: SHA512_CONTEXT + * %r1: data + * %r2: u64 k[] constants + */ + mov %r3, #0; + + /* Load context to d0-d7 */ + vld1.64 {RA-RD}, [%r0]!; + vld1.64 {RE-RH}, [%r0]; + sub %r0, #(4*8); + + /* Load input to w[16], d16-d31 */ + /* NOTE: Assumes that on ARMv7 unaligned accesses are always allowed. */ + vld1.64 {RW0-RW3}, [%r1]!; + vld1.64 {RW4-RW7}, [%r1]!; + vld1.64 {RW8-RW11}, [%r1]!; + vld1.64 {RW12-RW15}, [%r1]; +#ifdef __ARMEL__ + /* byteswap */ + vrev64.8 RW01q, RW01q; + vrev64.8 RW23q, RW23q; + vrev64.8 RW45q, RW45q; + vrev64.8 RW67q, RW67q; + vrev64.8 RW89q, RW89q; + vrev64.8 RW1011q, RW1011q; + vrev64.8 RW1213q, RW1213q; + vrev64.8 RW1415q, RW1415q; +#endif + + /* EABI says that d8-d15 must be preserved by callee. */ + vpush {RT0-RT7}; + +.Loop: + add %r3, #16; + round_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW14, RW9, RW1); + cmp %r3, #64; + round_0_63(RH, RA, RB, RC, RD, RE, RF, RG, RW1, RW15, RW10, RW2); + round_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW0, RW11, RW3); + round_0_63(RF, RG, RH, RA, RB, RC, RD, RE, RW3, RW1, RW12, RW4); + round_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW2, RW13, RW5); + round_0_63(RD, RE, RF, RG, RH, RA, RB, RC, RW5, RW3, RW14, RW6); + round_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW4, RW15, RW7); + round_0_63(RB, RC, RD, RE, RF, RG, RH, RA, RW7, RW5, RW0, RW8); + round_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW6, RW1, RW9); + round_0_63(RH, RA, RB, RC, RD, RE, RF, RG, RW9, RW7, RW2, RW10); + round_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW8, RW3, RW11); + round_0_63(RF, RG, RH, RA, RB, RC, RD, RE, RW11, RW9, RW4, RW12); + round_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW10, RW5, RW13); + round_0_63(RD, RE, RF, RG, RH, RA, RB, RC, RW13, RW11, RW6, RW14); + round_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW12, RW7, RW15); + round_0_63(RB, RC, RD, RE, RF, RG, RH, RA, RW15, RW13, RW8, RW0); + bne .Loop; + + round_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW0); + round_64_79(RH, RA, RB, RC, RD, RE, RF, RG, RW1); + round_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW2); + round_64_79(RF, RG, RH, RA, RB, RC, RD, RE, RW3); + round_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4); + round_64_79(RD, RE, RF, RG, RH, RA, RB, RC, RW5); + round_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6); + round_64_79(RB, RC, RD, RE, RF, RG, RH, RA, RW7); + round_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8); + round_64_79(RH, RA, RB, RC, RD, RE, RF, RG, RW9); + round_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10); + round_64_79(RF, RG, RH, RA, RB, RC, RD, RE, RW11); + round_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12); + round_64_79(RD, RE, RF, RG, RH, RA, RB, RC, RW13); + round_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14); + round_64_79(RB, RC, RD, RE, RF, RG, RH, RA, RW15); + + /* Load context to d16-d23 */ + vld1.64 {RW0-RW3}, [%r0]!; + vld1.64 {RW4-RW7}, [%r0]; + sub %r0, #(4*8); + + vadd.u64 RA, RW0; + vadd.u64 RB, RW1; + vadd.u64 RC, RW2; + vadd.u64 RD, RW3; + vadd.u64 RE, RW4; + vadd.u64 RF, RW5; + vadd.u64 RG, RW6; + vadd.u64 RH, RW7; + + /* Store the first half of context */ + vst1.64 {RA-RD}, [%r0]!; + + /* Clear used registers */ + /* d16-d31 */ + veor.u64 RW01q, RW01q; + veor.u64 RW23q, RW23q; + veor.u64 RW45q, RW45q; + veor.u64 RW67q, RW67q; + vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */ + veor.u64 RW89q, RW89q; + veor.u64 RW1011q, RW1011q; + veor.u64 RW1213q, RW1213q; + veor.u64 RW1415q, RW1415q; + /* d8-d15 */ + vpop {RT0-RT7}; + /* d0-d7 (q0-q3) */ + veor.u64 %q0, %q0; + veor.u64 %q1, %q1; + veor.u64 %q2, %q2; + veor.u64 %q3, %q3; + + bx %lr; +.size _gcry_sha512_transform_armv7_neon,.-_gcry_sha512_transform_armv7_neon; + +#endif diff --git a/cipher/sha512.c b/cipher/sha512.c index 1bbcd111..fee3e713 100644 --- a/cipher/sha512.c +++ b/cipher/sha512.c @@ -53,12 +53,26 @@ #include "cipher.h" #include "hash-common.h" + +/* USE_ARM_NEON_ASM indicates whether to enable ARM NEON assembly code. */ +#undef USE_ARM_NEON_ASM +#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) +# if defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \ + defined(HAVE_GCC_INLINE_ASM_NEON) +# define USE_ARM_NEON_ASM 1 +# endif +#endif + + typedef struct { u64 h0, h1, h2, h3, h4, h5, h6, h7; u64 nblocks; byte buf[128]; int count; +#ifdef USE_ARM_NEON_ASM + int use_neon; +#endif } SHA512_CONTEXT; static void @@ -77,6 +91,9 @@ sha512_init (void *context) hd->nblocks = 0; hd->count = 0; +#ifdef USE_ARM_NEON_ASM + hd->use_neon = (_gcry_get_hw_features () & HWF_ARM_NEON) != 0; +#endif } static void @@ -95,6 +112,9 @@ sha384_init (void *context) hd->nblocks = 0; hd->count = 0; +#ifdef USE_ARM_NEON_ASM + hd->use_neon = (_gcry_get_hw_features () & HWF_ARM_NEON) != 0; +#endif } @@ -128,58 +148,59 @@ Sum1 (u64 x) return (ROTR (x, 14) ^ ROTR (x, 18) ^ ROTR (x, 41)); } +static const u64 k[] = + { + U64_C(0x428a2f98d728ae22), U64_C(0x7137449123ef65cd), + U64_C(0xb5c0fbcfec4d3b2f), U64_C(0xe9b5dba58189dbbc), + U64_C(0x3956c25bf348b538), U64_C(0x59f111f1b605d019), + U64_C(0x923f82a4af194f9b), U64_C(0xab1c5ed5da6d8118), + U64_C(0xd807aa98a3030242), U64_C(0x12835b0145706fbe), + U64_C(0x243185be4ee4b28c), U64_C(0x550c7dc3d5ffb4e2), + U64_C(0x72be5d74f27b896f), U64_C(0x80deb1fe3b1696b1), + U64_C(0x9bdc06a725c71235), U64_C(0xc19bf174cf692694), + U64_C(0xe49b69c19ef14ad2), U64_C(0xefbe4786384f25e3), + U64_C(0x0fc19dc68b8cd5b5), U64_C(0x240ca1cc77ac9c65), + U64_C(0x2de92c6f592b0275), U64_C(0x4a7484aa6ea6e483), + U64_C(0x5cb0a9dcbd41fbd4), U64_C(0x76f988da831153b5), + U64_C(0x983e5152ee66dfab), U64_C(0xa831c66d2db43210), + U64_C(0xb00327c898fb213f), U64_C(0xbf597fc7beef0ee4), + U64_C(0xc6e00bf33da88fc2), U64_C(0xd5a79147930aa725), + U64_C(0x06ca6351e003826f), U64_C(0x142929670a0e6e70), + U64_C(0x27b70a8546d22ffc), U64_C(0x2e1b21385c26c926), + U64_C(0x4d2c6dfc5ac42aed), U64_C(0x53380d139d95b3df), + U64_C(0x650a73548baf63de), U64_C(0x766a0abb3c77b2a8), + U64_C(0x81c2c92e47edaee6), U64_C(0x92722c851482353b), + U64_C(0xa2bfe8a14cf10364), U64_C(0xa81a664bbc423001), + U64_C(0xc24b8b70d0f89791), U64_C(0xc76c51a30654be30), + U64_C(0xd192e819d6ef5218), U64_C(0xd69906245565a910), + U64_C(0xf40e35855771202a), U64_C(0x106aa07032bbd1b8), + U64_C(0x19a4c116b8d2d0c8), U64_C(0x1e376c085141ab53), + U64_C(0x2748774cdf8eeb99), U64_C(0x34b0bcb5e19b48a8), + U64_C(0x391c0cb3c5c95a63), U64_C(0x4ed8aa4ae3418acb), + U64_C(0x5b9cca4f7763e373), U64_C(0x682e6ff3d6b2b8a3), + U64_C(0x748f82ee5defb2fc), U64_C(0x78a5636f43172f60), + U64_C(0x84c87814a1f0ab72), U64_C(0x8cc702081a6439ec), + U64_C(0x90befffa23631e28), U64_C(0xa4506cebde82bde9), + U64_C(0xbef9a3f7b2c67915), U64_C(0xc67178f2e372532b), + U64_C(0xca273eceea26619c), U64_C(0xd186b8c721c0c207), + U64_C(0xeada7dd6cde0eb1e), U64_C(0xf57d4f7fee6ed178), + U64_C(0x06f067aa72176fba), U64_C(0x0a637dc5a2c898a6), + U64_C(0x113f9804bef90dae), U64_C(0x1b710b35131c471b), + U64_C(0x28db77f523047d84), U64_C(0x32caab7b40c72493), + U64_C(0x3c9ebe0a15c9bebc), U64_C(0x431d67c49c100d4c), + U64_C(0x4cc5d4becb3e42b6), U64_C(0x597f299cfc657e2a), + U64_C(0x5fcb6fab3ad6faec), U64_C(0x6c44198c4a475817) + }; + /**************** * Transform the message W which consists of 16 64-bit-words */ static void -transform (SHA512_CONTEXT *hd, const unsigned char *data) +__transform (SHA512_CONTEXT *hd, const unsigned char *data) { u64 a, b, c, d, e, f, g, h; u64 w[16]; int t; - static const u64 k[] = - { - U64_C(0x428a2f98d728ae22), U64_C(0x7137449123ef65cd), - U64_C(0xb5c0fbcfec4d3b2f), U64_C(0xe9b5dba58189dbbc), - U64_C(0x3956c25bf348b538), U64_C(0x59f111f1b605d019), - U64_C(0x923f82a4af194f9b), U64_C(0xab1c5ed5da6d8118), - U64_C(0xd807aa98a3030242), U64_C(0x12835b0145706fbe), - U64_C(0x243185be4ee4b28c), U64_C(0x550c7dc3d5ffb4e2), - U64_C(0x72be5d74f27b896f), U64_C(0x80deb1fe3b1696b1), - U64_C(0x9bdc06a725c71235), U64_C(0xc19bf174cf692694), - U64_C(0xe49b69c19ef14ad2), U64_C(0xefbe4786384f25e3), - U64_C(0x0fc19dc68b8cd5b5), U64_C(0x240ca1cc77ac9c65), - U64_C(0x2de92c6f592b0275), U64_C(0x4a7484aa6ea6e483), - U64_C(0x5cb0a9dcbd41fbd4), U64_C(0x76f988da831153b5), - U64_C(0x983e5152ee66dfab), U64_C(0xa831c66d2db43210), - U64_C(0xb00327c898fb213f), U64_C(0xbf597fc7beef0ee4), - U64_C(0xc6e00bf33da88fc2), U64_C(0xd5a79147930aa725), - U64_C(0x06ca6351e003826f), U64_C(0x142929670a0e6e70), - U64_C(0x27b70a8546d22ffc), U64_C(0x2e1b21385c26c926), - U64_C(0x4d2c6dfc5ac42aed), U64_C(0x53380d139d95b3df), - U64_C(0x650a73548baf63de), U64_C(0x766a0abb3c77b2a8), - U64_C(0x81c2c92e47edaee6), U64_C(0x92722c851482353b), - U64_C(0xa2bfe8a14cf10364), U64_C(0xa81a664bbc423001), - U64_C(0xc24b8b70d0f89791), U64_C(0xc76c51a30654be30), - U64_C(0xd192e819d6ef5218), U64_C(0xd69906245565a910), - U64_C(0xf40e35855771202a), U64_C(0x106aa07032bbd1b8), - U64_C(0x19a4c116b8d2d0c8), U64_C(0x1e376c085141ab53), - U64_C(0x2748774cdf8eeb99), U64_C(0x34b0bcb5e19b48a8), - U64_C(0x391c0cb3c5c95a63), U64_C(0x4ed8aa4ae3418acb), - U64_C(0x5b9cca4f7763e373), U64_C(0x682e6ff3d6b2b8a3), - U64_C(0x748f82ee5defb2fc), U64_C(0x78a5636f43172f60), - U64_C(0x84c87814a1f0ab72), U64_C(0x8cc702081a6439ec), - U64_C(0x90befffa23631e28), U64_C(0xa4506cebde82bde9), - U64_C(0xbef9a3f7b2c67915), U64_C(0xc67178f2e372532b), - U64_C(0xca273eceea26619c), U64_C(0xd186b8c721c0c207), - U64_C(0xeada7dd6cde0eb1e), U64_C(0xf57d4f7fee6ed178), - U64_C(0x06f067aa72176fba), U64_C(0x0a637dc5a2c898a6), - U64_C(0x113f9804bef90dae), U64_C(0x1b710b35131c471b), - U64_C(0x28db77f523047d84), U64_C(0x32caab7b40c72493), - U64_C(0x3c9ebe0a15c9bebc), U64_C(0x431d67c49c100d4c), - U64_C(0x4cc5d4becb3e42b6), U64_C(0x597f299cfc657e2a), - U64_C(0x5fcb6fab3ad6faec), U64_C(0x6c44198c4a475817) - }; /* get values from the chaining vars */ a = hd->h0; @@ -455,6 +476,33 @@ transform (SHA512_CONTEXT *hd, const unsigned char *data) } +#ifdef USE_ARM_NEON_ASM +void _gcry_sha512_transform_armv7_neon (SHA512_CONTEXT *hd, + const unsigned char *data, + const u64 k[]); +#endif + + +static unsigned int +transform (SHA512_CONTEXT *hd, const unsigned char *data) +{ +#ifdef USE_ARM_NEON_ASM + if (hd->use_neon) + { + _gcry_sha512_transform_armv7_neon(hd, data, k); + + /* return stack burn depth */ + return (sizeof(void *) * 3); + } +#endif + + __transform (hd, data); + + /* return stack burn depth */ + return 256; +} + + /* Update the message digest with the contents * of INBUF with length INLEN. */ @@ -463,11 +511,12 @@ sha512_write (void *context, const void *inbuf_arg, size_t inlen) { const unsigned char *inbuf = inbuf_arg; SHA512_CONTEXT *hd = context; + unsigned int stack_burn_depth = 0; if (hd->count == 128) { /* flush the buffer */ - transform (hd, hd->buf); - _gcry_burn_stack (256); + stack_burn_depth = transform (hd, hd->buf); + _gcry_burn_stack (stack_burn_depth); hd->count = 0; hd->nblocks++; } @@ -484,13 +533,13 @@ sha512_write (void *context, const void *inbuf_arg, size_t inlen) while (inlen >= 128) { - transform (hd, inbuf); + stack_burn_depth = transform (hd, inbuf); hd->count = 0; hd->nblocks++; inlen -= 128; inbuf += 128; } - _gcry_burn_stack (256); + _gcry_burn_stack (stack_burn_depth); for (; inlen && hd->count < 128; inlen--) hd->buf[hd->count++] = *inbuf++; } @@ -508,6 +557,7 @@ static void sha512_final (void *context) { SHA512_CONTEXT *hd = context; + unsigned int stack_burn_depth; u64 t, msb, lsb; byte *p; @@ -559,8 +609,8 @@ sha512_final (void *context) hd->buf[125] = lsb >> 16; hd->buf[126] = lsb >> 8; hd->buf[127] = lsb; - transform (hd, hd->buf); - _gcry_burn_stack (256); + stack_burn_depth = transform (hd, hd->buf); + _gcry_burn_stack (stack_burn_depth); p = hd->buf; #ifdef WORDS_BIGENDIAN diff --git a/configure.ac b/configure.ac index 89b9366d..959327ad 100644 --- a/configure.ac +++ b/configure.ac @@ -1526,6 +1526,11 @@ LIST_MEMBER(sha512, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512.lo" AC_DEFINE(USE_SHA512, 1, [Defined if this module should be included]) + + if test x"$neonsupport" = xyes ; then + # Build with the NEON implementation + GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512-armv7-neon.lo" + fi fi LIST_MEMBER(tiger, $enabled_digests) |