summaryrefslogtreecommitdiff
path: root/cipher/camellia-glue.c
diff options
context:
space:
mode:
authorJussi Kivilinna <jussi.kivilinna@iki.fi>2015-08-10 22:09:56 +0300
committerJussi Kivilinna <jussi.kivilinna@iki.fi>2015-08-10 22:09:56 +0300
commit49f52c67fb42c0656c8f9af655087f444562ca82 (patch)
tree2ef935a60649db8d61b3e1f36982788a15a10506 /cipher/camellia-glue.c
parentce746936b6c210e602d106cfbf45cf60b408d871 (diff)
downloadlibgcrypt-49f52c67fb42c0656c8f9af655087f444562ca82.tar.gz
Optimize OCB offset calculation
* cipher/cipher-internal.h (ocb_get_l): New. * cipher/cipher-ocb.c (_gcry_cipher_ocb_authenticate) (ocb_crypt): Use 'ocb_get_l' instead of '_gcry_cipher_ocb_get_l'. * cipher/camellia-glue.c (get_l): Remove. (_gcry_camellia_ocb_crypt, _gcry_camellia_ocb_auth): Precalculate offset array when block count matches parallel operation size; Use 'ocb_get_l' instead of 'get_l'. * cipher/rijndael-aesni.c (get_l): Add fast path for 75% most common offsets. (aesni_ocb_enc, aesni_ocb_dec, _gcry_aes_aesni_ocb_auth): Precalculate offset array when block count matches parallel operation size. * cipher/rijndael-ssse3-amd64.c (get_l): Add fast path for 75% most common offsets. * cipher/rijndael.c (_gcry_aes_ocb_crypt, _gcry_aes_ocb_auth): Use 'ocb_get_l' instead of '_gcry_cipher_ocb_get_l'. * cipher/serpent.c (get_l): Remove. (_gcry_serpent_ocb_crypt, _gcry_serpent_ocb_auth): Precalculate offset array when block count matches parallel operation size; Use 'ocb_get_l' instead of 'get_l'. * cipher/twofish.c (get_l): Remove. (_gcry_twofish_ocb_crypt, _gcry_twofish_ocb_auth): Use 'ocb_get_l' instead of 'get_l'. -- Patch optimizes OCB offset calculation for generic code and assembly implementations with parallel block processing. Benchmark of OCB AES-NI on Intel Haswell: $ tests/bench-slope --cpu-mhz 3201 cipher aes Before: AES | nanosecs/byte mebibytes/sec cycles/byte CTR enc | 0.274 ns/B 3483.9 MiB/s 0.876 c/B CTR dec | 0.273 ns/B 3490.0 MiB/s 0.875 c/B OCB enc | 0.289 ns/B 3296.1 MiB/s 0.926 c/B OCB dec | 0.299 ns/B 3189.9 MiB/s 0.957 c/B OCB auth | 0.260 ns/B 3670.0 MiB/s 0.832 c/B After: AES | nanosecs/byte mebibytes/sec cycles/byte CTR enc | 0.273 ns/B 3489.4 MiB/s 0.875 c/B CTR dec | 0.273 ns/B 3487.5 MiB/s 0.875 c/B OCB enc | 0.248 ns/B 3852.8 MiB/s 0.792 c/B OCB dec | 0.261 ns/B 3659.5 MiB/s 0.834 c/B OCB auth | 0.227 ns/B 4205.5 MiB/s 0.726 c/B Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
Diffstat (limited to 'cipher/camellia-glue.c')
-rw-r--r--cipher/camellia-glue.c161
1 files changed, 124 insertions, 37 deletions
diff --git a/cipher/camellia-glue.c b/cipher/camellia-glue.c
index 99516fc6..2d5dd209 100644
--- a/cipher/camellia-glue.c
+++ b/cipher/camellia-glue.c
@@ -604,19 +604,6 @@ _gcry_camellia_cfb_dec(void *context, unsigned char *iv,
_gcry_burn_stack(burn_stack_depth);
}
-#if defined(USE_AESNI_AVX) || defined(USE_AESNI_AVX2)
-static inline const unsigned char *
-get_l (gcry_cipher_hd_t c, unsigned char *l_tmp, u64 i)
-{
- unsigned int ntz = _gcry_ctz64 (i);
-
- if (ntz < OCB_L_TABLE_SIZE)
- return c->u_mode.ocb.L[ntz];
- else
- return _gcry_cipher_ocb_get_l (c, l_tmp, i);
-}
-#endif
-
/* Bulk encryption/decryption of complete blocks in OCB mode. */
size_t
_gcry_camellia_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
@@ -646,17 +633,43 @@ _gcry_camellia_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
const void *Ls[32];
int i;
+ if (blkn % 32 == 0)
+ {
+ for (i = 0; i < 32; i += 8)
+ {
+ Ls[i + 0] = c->u_mode.ocb.L[0];
+ Ls[i + 1] = c->u_mode.ocb.L[1];
+ Ls[i + 2] = c->u_mode.ocb.L[0];
+ Ls[i + 3] = c->u_mode.ocb.L[2];
+ Ls[i + 4] = c->u_mode.ocb.L[0];
+ Ls[i + 5] = c->u_mode.ocb.L[1];
+ Ls[i + 6] = c->u_mode.ocb.L[0];
+ }
+
+ Ls[7] = c->u_mode.ocb.L[3];
+ Ls[15] = c->u_mode.ocb.L[4];
+ Ls[23] = c->u_mode.ocb.L[3];
+ }
+
/* Process data in 32 block chunks. */
while (nblocks >= 32)
{
/* l_tmp will be used only every 65536-th block. */
- for (i = 0; i < 32; i += 4)
+ if (blkn % 32 == 0)
+ {
+ blkn += 32;
+ Ls[31] = ocb_get_l(c, l_tmp, blkn);
+ }
+ else
{
- Ls[i + 0] = get_l(c, l_tmp, blkn + 1);
- Ls[i + 1] = get_l(c, l_tmp, blkn + 2);
- Ls[i + 2] = get_l(c, l_tmp, blkn + 3);
- Ls[i + 3] = get_l(c, l_tmp, blkn + 4);
- blkn += 4;
+ for (i = 0; i < 32; i += 4)
+ {
+ Ls[i + 0] = ocb_get_l(c, l_tmp, blkn + 1);
+ Ls[i + 1] = ocb_get_l(c, l_tmp, blkn + 2);
+ Ls[i + 2] = ocb_get_l(c, l_tmp, blkn + 3);
+ Ls[i + 3] = ocb_get_l(c, l_tmp, blkn + 4);
+ blkn += 4;
+ }
}
if (encrypt)
@@ -692,17 +705,41 @@ _gcry_camellia_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
const void *Ls[16];
int i;
+ if (blkn % 16 == 0)
+ {
+ for (i = 0; i < 16; i += 8)
+ {
+ Ls[i + 0] = c->u_mode.ocb.L[0];
+ Ls[i + 1] = c->u_mode.ocb.L[1];
+ Ls[i + 2] = c->u_mode.ocb.L[0];
+ Ls[i + 3] = c->u_mode.ocb.L[2];
+ Ls[i + 4] = c->u_mode.ocb.L[0];
+ Ls[i + 5] = c->u_mode.ocb.L[1];
+ Ls[i + 6] = c->u_mode.ocb.L[0];
+ }
+
+ Ls[7] = c->u_mode.ocb.L[3];
+ }
+
/* Process data in 16 block chunks. */
while (nblocks >= 16)
{
/* l_tmp will be used only every 65536-th block. */
- for (i = 0; i < 16; i += 4)
+ if (blkn % 16 == 0)
{
- Ls[i + 0] = get_l(c, l_tmp, blkn + 1);
- Ls[i + 1] = get_l(c, l_tmp, blkn + 2);
- Ls[i + 2] = get_l(c, l_tmp, blkn + 3);
- Ls[i + 3] = get_l(c, l_tmp, blkn + 4);
- blkn += 4;
+ blkn += 16;
+ Ls[15] = ocb_get_l(c, l_tmp, blkn);
+ }
+ else
+ {
+ for (i = 0; i < 16; i += 4)
+ {
+ Ls[i + 0] = ocb_get_l(c, l_tmp, blkn + 1);
+ Ls[i + 1] = ocb_get_l(c, l_tmp, blkn + 2);
+ Ls[i + 2] = ocb_get_l(c, l_tmp, blkn + 3);
+ Ls[i + 3] = ocb_get_l(c, l_tmp, blkn + 4);
+ blkn += 4;
+ }
}
if (encrypt)
@@ -768,17 +805,43 @@ _gcry_camellia_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
const void *Ls[32];
int i;
+ if (blkn % 32 == 0)
+ {
+ for (i = 0; i < 32; i += 8)
+ {
+ Ls[i + 0] = c->u_mode.ocb.L[0];
+ Ls[i + 1] = c->u_mode.ocb.L[1];
+ Ls[i + 2] = c->u_mode.ocb.L[0];
+ Ls[i + 3] = c->u_mode.ocb.L[2];
+ Ls[i + 4] = c->u_mode.ocb.L[0];
+ Ls[i + 5] = c->u_mode.ocb.L[1];
+ Ls[i + 6] = c->u_mode.ocb.L[0];
+ }
+
+ Ls[7] = c->u_mode.ocb.L[3];
+ Ls[15] = c->u_mode.ocb.L[4];
+ Ls[23] = c->u_mode.ocb.L[3];
+ }
+
/* Process data in 32 block chunks. */
while (nblocks >= 32)
{
/* l_tmp will be used only every 65536-th block. */
- for (i = 0; i < 32; i += 4)
+ if (blkn % 32 == 0)
{
- Ls[i + 0] = get_l(c, l_tmp, blkn + 1);
- Ls[i + 1] = get_l(c, l_tmp, blkn + 2);
- Ls[i + 2] = get_l(c, l_tmp, blkn + 3);
- Ls[i + 3] = get_l(c, l_tmp, blkn + 4);
- blkn += 4;
+ blkn += 32;
+ Ls[31] = ocb_get_l(c, l_tmp, blkn);
+ }
+ else
+ {
+ for (i = 0; i < 32; i += 4)
+ {
+ Ls[i + 0] = ocb_get_l(c, l_tmp, blkn + 1);
+ Ls[i + 1] = ocb_get_l(c, l_tmp, blkn + 2);
+ Ls[i + 2] = ocb_get_l(c, l_tmp, blkn + 3);
+ Ls[i + 3] = ocb_get_l(c, l_tmp, blkn + 4);
+ blkn += 4;
+ }
}
_gcry_camellia_aesni_avx2_ocb_auth(ctx, abuf, c->u_mode.ocb.aad_offset,
@@ -809,17 +872,41 @@ _gcry_camellia_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
const void *Ls[16];
int i;
+ if (blkn % 16 == 0)
+ {
+ for (i = 0; i < 16; i += 8)
+ {
+ Ls[i + 0] = c->u_mode.ocb.L[0];
+ Ls[i + 1] = c->u_mode.ocb.L[1];
+ Ls[i + 2] = c->u_mode.ocb.L[0];
+ Ls[i + 3] = c->u_mode.ocb.L[2];
+ Ls[i + 4] = c->u_mode.ocb.L[0];
+ Ls[i + 5] = c->u_mode.ocb.L[1];
+ Ls[i + 6] = c->u_mode.ocb.L[0];
+ }
+
+ Ls[7] = c->u_mode.ocb.L[3];
+ }
+
/* Process data in 16 block chunks. */
while (nblocks >= 16)
{
/* l_tmp will be used only every 65536-th block. */
- for (i = 0; i < 16; i += 4)
+ if (blkn % 16 == 0)
+ {
+ blkn += 16;
+ Ls[15] = ocb_get_l(c, l_tmp, blkn);
+ }
+ else
{
- Ls[i + 0] = get_l(c, l_tmp, blkn + 1);
- Ls[i + 1] = get_l(c, l_tmp, blkn + 2);
- Ls[i + 2] = get_l(c, l_tmp, blkn + 3);
- Ls[i + 3] = get_l(c, l_tmp, blkn + 4);
- blkn += 4;
+ for (i = 0; i < 16; i += 4)
+ {
+ Ls[i + 0] = ocb_get_l(c, l_tmp, blkn + 1);
+ Ls[i + 1] = ocb_get_l(c, l_tmp, blkn + 2);
+ Ls[i + 2] = ocb_get_l(c, l_tmp, blkn + 3);
+ Ls[i + 3] = ocb_get_l(c, l_tmp, blkn + 4);
+ blkn += 4;
+ }
}
_gcry_camellia_aesni_avx_ocb_auth(ctx, abuf, c->u_mode.ocb.aad_offset,