diff options
author | Jussi Kivilinna <jussi.kivilinna@iki.fi> | 2015-10-24 12:41:23 +0300 |
---|---|---|
committer | Jussi Kivilinna <jussi.kivilinna@iki.fi> | 2015-10-28 20:08:45 +0200 |
commit | 16fd540f4d01eb6dc23d9509ae549353617c7a67 (patch) | |
tree | 042cee8e0c1c3d3dfbd5dedb44fa3ba0e0fb0848 /cipher/serpent.c | |
parent | ae40af427fd2a856b24ec2a41323ec8b80ffc9c0 (diff) | |
download | libgcrypt-16fd540f4d01eb6dc23d9509ae549353617c7a67.tar.gz |
Fix OCB amd64 assembly implementations for x32
* cipher/camellia-glue.c (_gcry_camellia_aesni_avx_ocb_enc)
(_gcry_camellia_aesni_avx_ocb_dec, _gcry_camellia_aesni_avx_ocb_auth)
(_gcry_camellia_aesni_avx2_ocb_enc, _gcry_camellia_aesni_avx2_ocb_dec)
(_gcry_camellia_aesni_avx2_ocb_auth, _gcry_camellia_ocb_crypt)
(_gcry_camellia_ocb_auth): Change 'Ls' from pointer array to u64 array.
* cipher/serpent.c (_gcry_serpent_sse2_ocb_enc)
(_gcry_serpent_sse2_ocb_dec, _gcry_serpent_sse2_ocb_auth)
(_gcry_serpent_avx2_ocb_enc, _gcry_serpent_avx2_ocb_dec)
(_gcry_serpent_ocb_crypt, _gcry_serpent_ocb_auth): Ditto.
* cipher/twofish.c (_gcry_twofish_amd64_ocb_enc)
(_gcry_twofish_amd64_ocb_dec, _gcry_twofish_amd64_ocb_auth)
(twofish_amd64_ocb_enc, twofish_amd64_ocb_dec, twofish_amd64_ocb_auth)
(_gcry_twofish_ocb_crypt, _gcry_twofish_ocb_auth): Ditto.
--
Pointers on x32 are 32-bit, but amd64 assembly implementations
expect 64-bit pointers. Pass 'Ls' array to 64-bit integers so
that input arrays has correct format for assembly functions.
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
Diffstat (limited to 'cipher/serpent.c')
-rw-r--r-- | cipher/serpent.c | 104 |
1 files changed, 56 insertions, 48 deletions
diff --git a/cipher/serpent.c b/cipher/serpent.c index fc3afa6b..4ef7f529 100644 --- a/cipher/serpent.c +++ b/cipher/serpent.c @@ -125,20 +125,20 @@ extern void _gcry_serpent_sse2_ocb_enc(serpent_context_t *ctx, const unsigned char *in, unsigned char *offset, unsigned char *checksum, - const void *Ls[8]) ASM_FUNC_ABI; + const u64 Ls[8]) ASM_FUNC_ABI; extern void _gcry_serpent_sse2_ocb_dec(serpent_context_t *ctx, unsigned char *out, const unsigned char *in, unsigned char *offset, unsigned char *checksum, - const void *Ls[8]) ASM_FUNC_ABI; + const u64 Ls[8]) ASM_FUNC_ABI; extern void _gcry_serpent_sse2_ocb_auth(serpent_context_t *ctx, const unsigned char *abuf, unsigned char *offset, unsigned char *checksum, - const void *Ls[8]) ASM_FUNC_ABI; + const u64 Ls[8]) ASM_FUNC_ABI; #endif #ifdef USE_AVX2 @@ -165,20 +165,20 @@ extern void _gcry_serpent_avx2_ocb_enc(serpent_context_t *ctx, const unsigned char *in, unsigned char *offset, unsigned char *checksum, - const void *Ls[16]) ASM_FUNC_ABI; + const u64 Ls[16]) ASM_FUNC_ABI; extern void _gcry_serpent_avx2_ocb_dec(serpent_context_t *ctx, unsigned char *out, const unsigned char *in, unsigned char *offset, unsigned char *checksum, - const void *Ls[16]) ASM_FUNC_ABI; + const u64 Ls[16]) ASM_FUNC_ABI; extern void _gcry_serpent_avx2_ocb_auth(serpent_context_t *ctx, const unsigned char *abuf, unsigned char *offset, unsigned char *checksum, - const void *Ls[16]) ASM_FUNC_ABI; + const u64 Ls[16]) ASM_FUNC_ABI; #endif #ifdef USE_NEON @@ -1249,25 +1249,27 @@ _gcry_serpent_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, if (ctx->use_avx2) { int did_use_avx2 = 0; - const void *Ls[16]; + u64 Ls[16]; unsigned int n = 16 - (blkn % 16); - const void **l; + u64 *l; int i; if (nblocks >= 16) { for (i = 0; i < 16; i += 8) { - Ls[(i + 0 + n) % 16] = c->u_mode.ocb.L[0]; - Ls[(i + 1 + n) % 16] = c->u_mode.ocb.L[1]; - Ls[(i + 2 + n) % 16] = c->u_mode.ocb.L[0]; - Ls[(i + 3 + n) % 16] = c->u_mode.ocb.L[2]; - Ls[(i + 4 + n) % 16] = c->u_mode.ocb.L[0]; - Ls[(i + 5 + n) % 16] = c->u_mode.ocb.L[1]; - Ls[(i + 6 + n) % 16] = c->u_mode.ocb.L[0]; + /* Use u64 to store pointers for x32 support (assembly function + * assumes 64-bit pointers). */ + Ls[(i + 0 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; + Ls[(i + 1 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; + Ls[(i + 2 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; + Ls[(i + 3 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[2]; + Ls[(i + 4 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; + Ls[(i + 5 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; + Ls[(i + 6 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; } - Ls[(7 + n) % 16] = c->u_mode.ocb.L[3]; + Ls[(7 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[3]; l = &Ls[(15 + n) % 16]; /* Process data in 16 block chunks. */ @@ -1275,7 +1277,7 @@ _gcry_serpent_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, { /* l_tmp will be used only every 65536-th block. */ blkn += 16; - *l = ocb_get_l(c, l_tmp, blkn - blkn % 16); + *l = (uintptr_t)(void *)ocb_get_l(c, l_tmp, blkn - blkn % 16); if (encrypt) _gcry_serpent_avx2_ocb_enc(ctx, outbuf, inbuf, c->u_iv.iv, @@ -1305,19 +1307,21 @@ _gcry_serpent_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, #ifdef USE_SSE2 { int did_use_sse2 = 0; - const void *Ls[8]; + u64 Ls[8]; unsigned int n = 8 - (blkn % 8); - const void **l; + u64 *l; if (nblocks >= 8) { - Ls[(0 + n) % 8] = c->u_mode.ocb.L[0]; - Ls[(1 + n) % 8] = c->u_mode.ocb.L[1]; - Ls[(2 + n) % 8] = c->u_mode.ocb.L[0]; - Ls[(3 + n) % 8] = c->u_mode.ocb.L[2]; - Ls[(4 + n) % 8] = c->u_mode.ocb.L[0]; - Ls[(5 + n) % 8] = c->u_mode.ocb.L[1]; - Ls[(6 + n) % 8] = c->u_mode.ocb.L[0]; + /* Use u64 to store pointers for x32 support (assembly function + * assumes 64-bit pointers). */ + Ls[(0 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; + Ls[(1 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; + Ls[(2 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; + Ls[(3 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[2]; + Ls[(4 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; + Ls[(5 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; + Ls[(6 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; l = &Ls[(7 + n) % 8]; /* Process data in 8 block chunks. */ @@ -1325,7 +1329,7 @@ _gcry_serpent_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, { /* l_tmp will be used only every 65536-th block. */ blkn += 8; - *l = ocb_get_l(c, l_tmp, blkn - blkn % 8); + *l = (uintptr_t)(void *)ocb_get_l(c, l_tmp, blkn - blkn % 8); if (encrypt) _gcry_serpent_sse2_ocb_enc(ctx, outbuf, inbuf, c->u_iv.iv, @@ -1435,25 +1439,27 @@ _gcry_serpent_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, if (ctx->use_avx2) { int did_use_avx2 = 0; - const void *Ls[16]; + u64 Ls[16]; unsigned int n = 16 - (blkn % 16); - const void **l; + u64 *l; int i; if (nblocks >= 16) { for (i = 0; i < 16; i += 8) { - Ls[(i + 0 + n) % 16] = c->u_mode.ocb.L[0]; - Ls[(i + 1 + n) % 16] = c->u_mode.ocb.L[1]; - Ls[(i + 2 + n) % 16] = c->u_mode.ocb.L[0]; - Ls[(i + 3 + n) % 16] = c->u_mode.ocb.L[2]; - Ls[(i + 4 + n) % 16] = c->u_mode.ocb.L[0]; - Ls[(i + 5 + n) % 16] = c->u_mode.ocb.L[1]; - Ls[(i + 6 + n) % 16] = c->u_mode.ocb.L[0]; + /* Use u64 to store pointers for x32 support (assembly function + * assumes 64-bit pointers). */ + Ls[(i + 0 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; + Ls[(i + 1 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; + Ls[(i + 2 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; + Ls[(i + 3 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[2]; + Ls[(i + 4 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; + Ls[(i + 5 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; + Ls[(i + 6 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; } - Ls[(7 + n) % 16] = c->u_mode.ocb.L[3]; + Ls[(7 + n) % 16] = (uintptr_t)(void *)c->u_mode.ocb.L[3]; l = &Ls[(15 + n) % 16]; /* Process data in 16 block chunks. */ @@ -1461,7 +1467,7 @@ _gcry_serpent_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, { /* l_tmp will be used only every 65536-th block. */ blkn += 16; - *l = ocb_get_l(c, l_tmp, blkn - blkn % 16); + *l = (uintptr_t)(void *)ocb_get_l(c, l_tmp, blkn - blkn % 16); _gcry_serpent_avx2_ocb_auth(ctx, abuf, c->u_mode.ocb.aad_offset, c->u_mode.ocb.aad_sum, Ls); @@ -1486,19 +1492,21 @@ _gcry_serpent_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, #ifdef USE_SSE2 { int did_use_sse2 = 0; - const void *Ls[8]; + u64 Ls[8]; unsigned int n = 8 - (blkn % 8); - const void **l; + u64 *l; if (nblocks >= 8) { - Ls[(0 + n) % 8] = c->u_mode.ocb.L[0]; - Ls[(1 + n) % 8] = c->u_mode.ocb.L[1]; - Ls[(2 + n) % 8] = c->u_mode.ocb.L[0]; - Ls[(3 + n) % 8] = c->u_mode.ocb.L[2]; - Ls[(4 + n) % 8] = c->u_mode.ocb.L[0]; - Ls[(5 + n) % 8] = c->u_mode.ocb.L[1]; - Ls[(6 + n) % 8] = c->u_mode.ocb.L[0]; + /* Use u64 to store pointers for x32 support (assembly function + * assumes 64-bit pointers). */ + Ls[(0 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; + Ls[(1 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; + Ls[(2 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; + Ls[(3 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[2]; + Ls[(4 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; + Ls[(5 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[1]; + Ls[(6 + n) % 8] = (uintptr_t)(void *)c->u_mode.ocb.L[0]; l = &Ls[(7 + n) % 8]; /* Process data in 8 block chunks. */ @@ -1506,7 +1514,7 @@ _gcry_serpent_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, { /* l_tmp will be used only every 65536-th block. */ blkn += 8; - *l = ocb_get_l(c, l_tmp, blkn - blkn % 8); + *l = (uintptr_t)(void *)ocb_get_l(c, l_tmp, blkn - blkn % 8); _gcry_serpent_sse2_ocb_auth(ctx, abuf, c->u_mode.ocb.aad_offset, c->u_mode.ocb.aad_sum, Ls); |