summaryrefslogtreecommitdiff
path: root/cipher/cipher-internal.h
diff options
context:
space:
mode:
Diffstat (limited to 'cipher/cipher-internal.h')
-rw-r--r--cipher/cipher-internal.h13
1 files changed, 8 insertions, 5 deletions
diff --git a/cipher/cipher-internal.h b/cipher/cipher-internal.h
index f6bda668..fef0ecba 100644
--- a/cipher/cipher-internal.h
+++ b/cipher/cipher-internal.h
@@ -42,7 +42,7 @@
#define GCM_USE_TABLES 1
-/* GCM_USE_INTEL_PCLMUL inidicates whether to compile GCM with Intel PCLMUL
+/* GCM_USE_INTEL_PCLMUL indicates whether to compile GCM with Intel PCLMUL
code. */
#undef GCM_USE_INTEL_PCLMUL
#if defined(ENABLE_PCLMUL_SUPPORT) && defined(GCM_USE_TABLES)
@@ -54,6 +54,10 @@
#endif /* GCM_USE_INTEL_PCLMUL */
+typedef unsigned int (*ghash_fn_t) (gcry_cipher_hd_t c, byte *result,
+ const byte *buf, size_t nblocks);
+
+
/* A VIA processor with the Padlock engine as well as the Intel AES_NI
instructions require an alignment of most data on a 16 byte
boundary. Because we trick out the compiler while allocating the
@@ -188,6 +192,7 @@ struct gcry_cipher_handle
unsigned char macbuf[GCRY_CCM_BLOCK_LEN];
int mac_unused; /* Number of unprocessed bytes in MACBUF. */
+
/* byte counters for GCM */
u32 aadlen[2];
u32 datalen[2];
@@ -209,10 +214,8 @@ struct gcry_cipher_handle
unsigned char key[MAX_BLOCKSIZE];
} u_ghash_key;
-#ifdef GCM_USE_INTEL_PCLMUL
- /* Use Intel PCLMUL instructions for accelerated GHASH. */
- unsigned int use_intel_pclmul:1;
-#endif
+ /* GHASH implementation in use. */
+ ghash_fn_t ghash_fn;
/* Pre-calculated table for GCM. */
#ifdef GCM_USE_TABLES