summaryrefslogtreecommitdiff
path: root/cipher/rijndael.c
diff options
context:
space:
mode:
Diffstat (limited to 'cipher/rijndael.c')
-rw-r--r--cipher/rijndael.c38
1 files changed, 19 insertions, 19 deletions
diff --git a/cipher/rijndael.c b/cipher/rijndael.c
index 85c1a41d..e9bb4f68 100644
--- a/cipher/rijndael.c
+++ b/cipher/rijndael.c
@@ -67,11 +67,11 @@
# define USE_AMD64_ASM 1
#endif
-/* USE_ARMV6_ASM indicates whether to use ARMv6 assembly code. */
-#undef USE_ARMV6_ASM
-#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__)
+/* USE_ARM_ASM indicates whether to use ARM assembly code. */
+#undef USE_ARM_ASM
+#if defined(__ARMEL__)
# ifdef HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS
-# define USE_ARMV6_ASM 1
+# define USE_ARM_ASM 1
# endif
#endif
@@ -123,18 +123,18 @@ extern void _gcry_aes_amd64_decrypt_block(const void *keysched_dec,
int rounds);
#endif /*USE_AMD64_ASM*/
-#ifdef USE_ARMV6_ASM
-/* ARMv6 assembly implementations of AES */
-extern void _gcry_aes_armv6_encrypt_block(const void *keysched_enc,
+#ifdef USE_ARM_ASM
+/* ARM assembly implementations of AES */
+extern void _gcry_aes_arm_encrypt_block(const void *keysched_enc,
unsigned char *out,
const unsigned char *in,
int rounds);
-extern void _gcry_aes_armv6_decrypt_block(const void *keysched_dec,
+extern void _gcry_aes_arm_decrypt_block(const void *keysched_dec,
unsigned char *out,
const unsigned char *in,
int rounds);
-#endif /*USE_ARMV6_ASM*/
+#endif /*USE_ARM_ASM*/
@@ -567,8 +567,8 @@ do_encrypt_aligned (const RIJNDAEL_context *ctx,
{
#ifdef USE_AMD64_ASM
_gcry_aes_amd64_encrypt_block(ctx->keyschenc, b, a, ctx->rounds);
-#elif defined(USE_ARMV6_ASM)
- _gcry_aes_armv6_encrypt_block(ctx->keyschenc, b, a, ctx->rounds);
+#elif defined(USE_ARM_ASM)
+ _gcry_aes_arm_encrypt_block(ctx->keyschenc, b, a, ctx->rounds);
#else
#define rk (ctx->keyschenc)
int rounds = ctx->rounds;
@@ -651,7 +651,7 @@ do_encrypt_aligned (const RIJNDAEL_context *ctx,
*((u32_a_t*)(b+ 8)) ^= *((u32_a_t*)rk[rounds][2]);
*((u32_a_t*)(b+12)) ^= *((u32_a_t*)rk[rounds][3]);
#undef rk
-#endif /*!USE_AMD64_ASM && !USE_ARMV6_ASM*/
+#endif /*!USE_AMD64_ASM && !USE_ARM_ASM*/
}
@@ -659,7 +659,7 @@ static void
do_encrypt (const RIJNDAEL_context *ctx,
unsigned char *bx, const unsigned char *ax)
{
-#if !defined(USE_AMD64_ASM) && !defined(USE_ARMV6_ASM)
+#if !defined(USE_AMD64_ASM) && !defined(USE_ARM_ASM)
/* BX and AX are not necessary correctly aligned. Thus we might
need to copy them here. We try to align to a 16 bytes. */
if (((size_t)ax & 0x0f) || ((size_t)bx & 0x0f))
@@ -680,7 +680,7 @@ do_encrypt (const RIJNDAEL_context *ctx,
memcpy (bx, b.b, 16);
}
else
-#endif /*!USE_AMD64_ASM && !USE_ARMV6_ASM*/
+#endif /*!USE_AMD64_ASM && !USE_ARM_ASM*/
{
do_encrypt_aligned (ctx, bx, ax);
}
@@ -1694,8 +1694,8 @@ do_decrypt_aligned (RIJNDAEL_context *ctx,
{
#ifdef USE_AMD64_ASM
_gcry_aes_amd64_decrypt_block(ctx->keyschdec, b, a, ctx->rounds);
-#elif defined(USE_ARMV6_ASM)
- _gcry_aes_armv6_decrypt_block(ctx->keyschdec, b, a, ctx->rounds);
+#elif defined(USE_ARM_ASM)
+ _gcry_aes_arm_decrypt_block(ctx->keyschdec, b, a, ctx->rounds);
#else
#define rk (ctx->keyschdec)
int rounds = ctx->rounds;
@@ -1779,7 +1779,7 @@ do_decrypt_aligned (RIJNDAEL_context *ctx,
*((u32_a_t*)(b+ 8)) ^= *((u32_a_t*)rk[0][2]);
*((u32_a_t*)(b+12)) ^= *((u32_a_t*)rk[0][3]);
#undef rk
-#endif /*!USE_AMD64_ASM && !USE_ARMV6_ASM*/
+#endif /*!USE_AMD64_ASM && !USE_ARM_ASM*/
}
@@ -1794,7 +1794,7 @@ do_decrypt (RIJNDAEL_context *ctx, byte *bx, const byte *ax)
ctx->decryption_prepared = 1;
}
-#if !defined(USE_AMD64_ASM) && !defined(USE_ARMV6_ASM)
+#if !defined(USE_AMD64_ASM) && !defined(USE_ARM_ASM)
/* BX and AX are not necessary correctly aligned. Thus we might
need to copy them here. We try to align to a 16 bytes. */
if (((size_t)ax & 0x0f) || ((size_t)bx & 0x0f))
@@ -1815,7 +1815,7 @@ do_decrypt (RIJNDAEL_context *ctx, byte *bx, const byte *ax)
memcpy (bx, b.b, 16);
}
else
-#endif /*!USE_AMD64_ASM && !USE_ARMV6_ASM*/
+#endif /*!USE_AMD64_ASM && !USE_ARM_ASM*/
{
do_decrypt_aligned (ctx, bx, ax);
}