aboutsummaryrefslogtreecommitdiff
path: root/crypto/modes/modes_lcl.h
diff options
context:
space:
mode:
authorAndy Polyakov <appro@openssl.org>2013-04-13 20:57:37 +0200
committerAndy Polyakov <appro@openssl.org>2013-04-13 20:57:37 +0200
commit3bdd80521a81d50ade4214053cd9b293f920a77b (patch)
tree7815ce3cb0ac35544e598f2cf5e535efe12425f9 /crypto/modes/modes_lcl.h
parent4544f0a69161a37ee3edce3cc1bc34c3678a4d64 (diff)
downloadopenssl-3bdd80521a81d50ade4214053cd9b293f920a77b.zip
openssl-3bdd80521a81d50ade4214053cd9b293f920a77b.tar.gz
openssl-3bdd80521a81d50ade4214053cd9b293f920a77b.tar.bz2
crypto/modes/modes_lcl.h: let STRICT_ALIGNMENT be on ARMv7.
While ARMv7 in general is capable of unaligned access, not all instructions actually are. And trouble is that compiler doesn't seem to differentiate those capable and incapable of unaligned access. Side effect is that kernel goes into endless loop retrying same instruction triggering unaligned trap. Problem was observed in xts128.c and ccm128.c modules. It's possible to resolve it by using (volatile u32*) casts, but letting STRICT_ALIGNMENT be feels more appropriate.
Diffstat (limited to 'crypto/modes/modes_lcl.h')
-rw-r--r--crypto/modes/modes_lcl.h5
1 files changed, 1 insertions, 4 deletions
diff --git a/crypto/modes/modes_lcl.h b/crypto/modes/modes_lcl.h
index 40a6d5a..9d83e12 100644
--- a/crypto/modes/modes_lcl.h
+++ b/crypto/modes/modes_lcl.h
@@ -29,10 +29,7 @@ typedef unsigned char u8;
#if defined(__i386) || defined(__i386__) || \
defined(__x86_64) || defined(__x86_64__) || \
defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64) || \
- defined(__s390__) || defined(__s390x__) || \
- ( (defined(__arm__) || defined(__arm)) && \
- (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__)) )
+ defined(__s390__) || defined(__s390x__)
# undef STRICT_ALIGNMENT
#endif