summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJussi Kivilinna <jussi.kivilinna@iki.fi>2015-05-03 17:16:08 +0300
committerJussi Kivilinna <jussi.kivilinna@iki.fi>2015-05-03 17:31:58 +0300
commit24a769a7c7601dbb85332e550f6fbd121b56df5f (patch)
tree307b2157e49b3742630103ebfbc1fea878e5d96e
parent2f4fefdbc62857b6e2da26ce111ee140a068c471 (diff)
downloadlibgcrypt-24a769a7c7601dbb85332e550f6fbd121b56df5f.tar.gz
Fix WIN64 assembly glue for AES
* cipher/rinjdael.c (do_encrypt, do_decrypt) [!HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS]: Change input operands to input+output to mark volatile nature of the used registers. -- Function arguments cannot be passed to assembly block as input operands as target function modifies those input registers. Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
-rw-r--r--cipher/rijndael.c44
1 files changed, 24 insertions, 20 deletions
diff --git a/cipher/rijndael.c b/cipher/rijndael.c
index 7ebf3293..4f063c46 100644
--- a/cipher/rijndael.c
+++ b/cipher/rijndael.c
@@ -671,17 +671,19 @@ do_encrypt (const RIJNDAEL_context *ctx,
# else
/* Call SystemV ABI function without storing non-volatile XMM registers,
* as target function does not use vector instruction sets. */
+ const void *key = ctx->keyschenc;
+ uintptr_t rounds = ctx->rounds;
uintptr_t ret;
- asm ("movq %[encT], %%r8\n\t"
- "callq *%[ret]\n\t"
- : [ret] "=a" (ret)
- : "0" (_gcry_aes_amd64_encrypt_block),
- "D" (ctx->keyschenc),
- "S" (bx),
- "d" (ax),
- "c" (ctx->rounds),
- [encT] "r" (encT)
- : "cc", "memory", "r8", "r9", "r10", "r11");
+ asm volatile ("movq %[encT], %%r8\n\t"
+ "callq *%[ret]\n\t"
+ : [ret] "=a" (ret),
+ "+D" (key),
+ "+S" (bx),
+ "+d" (ax),
+ "+c" (rounds)
+ : "0" (_gcry_aes_amd64_encrypt_block),
+ [encT] "g" (encT)
+ : "cc", "memory", "r8", "r9", "r10", "r11");
return ret;
# endif /* HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS */
#elif defined(USE_ARM_ASM)
@@ -1031,17 +1033,19 @@ do_decrypt (const RIJNDAEL_context *ctx, unsigned char *bx,
# else
/* Call SystemV ABI function without storing non-volatile XMM registers,
* as target function does not use vector instruction sets. */
+ const void *key = ctx->keyschdec;
+ uintptr_t rounds = ctx->rounds;
uintptr_t ret;
- asm ("movq %[dectabs], %%r8\n\t"
- "callq *%[ret]\n\t"
- : [ret] "=a" (ret)
- : "0" (_gcry_aes_amd64_decrypt_block),
- "D" (ctx->keyschdec),
- "S" (bx),
- "d" (ax),
- "c" (ctx->rounds),
- [dectabs] "r" (&dec_tables)
- : "cc", "memory", "r8", "r9", "r10", "r11");
+ asm volatile ("movq %[dectabs], %%r8\n\t"
+ "callq *%[ret]\n\t"
+ : [ret] "=a" (ret),
+ "+D" (key),
+ "+S" (bx),
+ "+d" (ax),
+ "+c" (rounds)
+ : "0" (_gcry_aes_amd64_decrypt_block),
+ [dectabs] "g" (&dec_tables)
+ : "cc", "memory", "r8", "r9", "r10", "r11");
return ret;
# endif /* HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS */
#elif defined(USE_ARM_ASM)