summaryrefslogtreecommitdiff
path: root/src/g10lib.h
diff options
context:
space:
mode:
authorJussi Kivilinna <jussi.kivilinna@iki.fi>2015-03-21 13:01:38 +0200
committerJussi Kivilinna <jussi.kivilinna@iki.fi>2015-03-21 13:01:38 +0200
commita06fbc0d1e98eb1218eff55ad2f37d471e4f33b2 (patch)
tree98fc36154e9a5916fb4ba4bd867083f4a9478292 /src/g10lib.h
parent92fa5f16d69707e302c0f85b2e5e80af8dc037f1 (diff)
downloadlibgcrypt-a06fbc0d1e98eb1218eff55ad2f37d471e4f33b2.tar.gz
wipememory: use one-byte aligned type for unaligned memory accesses
* src/g10lib.h (fast_wipememory2_unaligned_head): Enable unaligned access only when HAVE_GCC_ATTRIBUTE_PACKED and HAVE_GCC_ATTRIBUTE_ALIGNED defined. (fast_wipememory_t): New. (fast_wipememory2): Use 'fast_wipememory_t'. -- Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
Diffstat (limited to 'src/g10lib.h')
-rw-r--r--src/g10lib.h23
1 files changed, 17 insertions, 6 deletions
diff --git a/src/g10lib.h b/src/g10lib.h
index 238871d0..50a08ec8 100644
--- a/src/g10lib.h
+++ b/src/g10lib.h
@@ -323,16 +323,26 @@ void __gcry_burn_stack (unsigned int bytes);
#endif
/* Following architectures can handle unaligned accesses fast. */
-#if defined(__i386__) || defined(__x86_64__) || \
- defined(__powerpc__) || defined(__powerpc64__) || \
- (defined(__arm__) && defined(__ARM_FEATURE_UNALIGNED)) || \
- defined(__aarch64__)
+#if defined(HAVE_GCC_ATTRIBUTE_PACKED) && \
+ defined(HAVE_GCC_ATTRIBUTE_ALIGNED) && \
+ (defined(__i386__) || defined(__x86_64__) || \
+ defined(__powerpc__) || defined(__powerpc64__) || \
+ (defined(__arm__) && defined(__ARM_FEATURE_UNALIGNED)) || \
+ defined(__aarch64__))
#define fast_wipememory2_unaligned_head(_ptr,_set,_len) /*do nothing*/
+typedef struct fast_wipememory_s
+{
+ FASTWIPE_T a;
+} __attribute__((packed, aligned(1))) fast_wipememory_t;
#else
#define fast_wipememory2_unaligned_head(_vptr,_vset,_vlen) do { \
while((size_t)(_vptr)&(sizeof(FASTWIPE_T)-1) && _vlen) \
{ *_vptr=(_vset); _vptr++; _vlen--; } \
} while(0)
+typedef struct fast_wipememory_s
+{
+ FASTWIPE_T a;
+} fast_wipememory_t;
#endif
/* fast_wipememory2 may leave tail bytes unhandled, in which case tail bytes
@@ -344,8 +354,9 @@ void __gcry_burn_stack (unsigned int bytes);
break; \
_vset_long *= FASTWIPE_MULT; \
do { \
- volatile FASTWIPE_T *_vptr_long = (volatile void *)_vptr; \
- *_vptr_long = _vset_long; \
+ volatile fast_wipememory_t *_vptr_long = \
+ (volatile void *)_vptr; \
+ _vptr_long->a = _vset_long; \
_vlen -= sizeof(FASTWIPE_T); \
_vptr += sizeof(FASTWIPE_T); \
} while (_vlen >= sizeof(FASTWIPE_T)); \