aboutsummaryrefslogtreecommitdiff
path: root/crypto/modes/gcm128.c
diff options
context:
space:
mode:
authorAndy Polyakov <appro@openssl.org>2015-01-25 15:48:42 +0100
committerAndy Polyakov <appro@openssl.org>2015-01-30 16:37:21 +0100
commit2e635aa81cf1c4e3fd7cb0334c79e7d0771140f1 (patch)
tree12f8725519fdc4deb90c8207c43e28fb528de5fe /crypto/modes/gcm128.c
parentb2991c081aba5351a3386bdde2927672d53e5c99 (diff)
downloadopenssl-2e635aa81cf1c4e3fd7cb0334c79e7d0771140f1.zip
openssl-2e635aa81cf1c4e3fd7cb0334c79e7d0771140f1.tar.gz
openssl-2e635aa81cf1c4e3fd7cb0334c79e7d0771140f1.tar.bz2
modes/gcm128.c: harmonize ctx->ghash assignment, shortcut *_ctr32
in OPENSSL_SMALL_FOOTPRINT build, remove undesired reformat artefact and inconsistency in pre-processor logic. Reviewed-by: Rich Salz <rsalz@openssl.org>
Diffstat (limited to 'crypto/modes/gcm128.c')
-rw-r--r--crypto/modes/gcm128.c198
1 files changed, 92 insertions, 106 deletions
diff --git a/crypto/modes/gcm128.c b/crypto/modes/gcm128.c
index b5ae9ae..5c75c91 100644
--- a/crypto/modes/gcm128.c
+++ b/crypto/modes/gcm128.c
@@ -148,9 +148,7 @@ static void gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256])
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
static const size_t rem_8bit[256] = {
PACK(0x0000), PACK(0x01C2), PACK(0x0384), PACK(0x0246),
PACK(0x0708), PACK(0x06CA), PACK(0x048C), PACK(0x054E),
@@ -319,9 +317,7 @@ static void gcm_init_4bit(u128 Htable[16], u64 H[2])
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
if (is_endian.little)
for (j = 0; j < 16; ++j) {
@@ -354,9 +350,7 @@ static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16])
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
nlo = ((const u8 *)Xi)[15];
nhi = nlo >> 4;
@@ -435,9 +429,7 @@ static void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16],
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
# if 1
do {
@@ -627,9 +619,7 @@ static void gcm_gmult_1bit(u64 Xi[2], const u64 H[2])
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
V.hi = H[0]; /* H is in host byte order, no byte swapping */
V.lo = H[1];
@@ -772,9 +762,7 @@ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block)
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
memset(ctx, 0, sizeof(*ctx));
ctx->block = block;
@@ -799,6 +787,11 @@ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block)
#if TABLE_BITS==8
gcm_init_8bit(ctx->Htable, ctx->H.u);
#elif TABLE_BITS==4
+# if defined(GHASH)
+# define CTX__GHASH(f) (ctx->ghash = (f))
+# else
+# define CTX__GHASH(f) (ctx->ghash = NULL)
+# endif
# if defined(GHASH_ASM_X86_OR_64)
# if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2)
if (OPENSSL_ia32cap_P[0] & (1 << 24) && /* check FXSR bit */
@@ -806,11 +799,11 @@ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block)
if (((OPENSSL_ia32cap_P[1] >> 22) & 0x41) == 0x41) { /* AVX+MOVBE */
gcm_init_avx(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_avx;
- ctx->ghash = gcm_ghash_avx;
+ CTX__GHASH(gcm_ghash_avx);
} else {
gcm_init_clmul(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_clmul;
- ctx->ghash = gcm_ghash_clmul;
+ CTX__GHASH(gcm_ghash_clmul);
}
return;
}
@@ -823,66 +816,59 @@ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block)
if (OPENSSL_ia32cap_P[0] & (1 << 23)) { /* check MMX bit */
# endif
ctx->gmult = gcm_gmult_4bit_mmx;
- ctx->ghash = gcm_ghash_4bit_mmx;
+ CTX__GHASH(gcm_ghash_4bit_mmx);
} else {
ctx->gmult = gcm_gmult_4bit_x86;
- ctx->ghash = gcm_ghash_4bit_x86;
+ CTX__GHASH(gcm_ghash_4bit_x86);
}
# else
ctx->gmult = gcm_gmult_4bit;
- ctx->ghash = gcm_ghash_4bit;
+ CTX__GHASH(gcm_ghash_4bit);
# endif
# elif defined(GHASH_ASM_ARM)
# ifdef PMULL_CAPABLE
if (PMULL_CAPABLE) {
gcm_init_v8(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_v8;
- ctx->ghash = gcm_ghash_v8;
+ CTX__GHASH(gcm_ghash_v8);
} else
# endif
# ifdef NEON_CAPABLE
if (NEON_CAPABLE) {
gcm_init_neon(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_neon;
- ctx->ghash = gcm_ghash_neon;
+ CTX__GHASH(gcm_ghash_neon);
} else
# endif
{
gcm_init_4bit(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_4bit;
-# if defined(GHASH)
- ctx->ghash = gcm_ghash_4bit;
-# else
- ctx->ghash = NULL;
-# endif
+ CTX__GHASH(gcm_ghash_4bit);
}
# elif defined(GHASH_ASM_SPARC)
if (OPENSSL_sparcv9cap_P[0] & SPARCV9_VIS3) {
gcm_init_vis3(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_vis3;
- ctx->ghash = gcm_ghash_vis3;
+ CTX__GHASH(gcm_ghash_vis3);
} else {
gcm_init_4bit(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_4bit;
- ctx->ghash = gcm_ghash_4bit;
+ CTX__GHASH(gcm_ghash_4bit);
}
# elif defined(GHASH_ASM_PPC)
if (OPENSSL_ppccap_P & PPC_CRYPTO207) {
gcm_init_p8(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_p8;
- ctx->ghash = gcm_ghash_p8;
+ CTX__GHASH(gcm_ghash_p8);
} else {
gcm_init_4bit(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_4bit;
-# if defined(GHASH)
- ctx->ghash = gcm_ghash_4bit;
-# else
- ctx->ghash = NULL;
-# endif
+ CTX__GHASH(gcm_ghash_4bit);
}
# else
gcm_init_4bit(ctx->Htable, ctx->H.u);
# endif
+# undef CTX__GHASH
#endif
}
@@ -892,9 +878,7 @@ void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv,
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
unsigned int ctr;
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
@@ -1038,9 +1022,7 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
unsigned int n, ctr;
size_t i;
u64 mlen = ctx->len.u[1];
@@ -1048,7 +1030,7 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
void *key = ctx->key;
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
-# ifdef GHASH
+# if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
# endif
@@ -1098,7 +1080,8 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
if (((size_t)in | (size_t)out) % sizeof(size_t) != 0)
break;
# endif
-# if defined(GHASH) && defined(GHASH_CHUNK)
+# if defined(GHASH)
+# if defined(GHASH_CHUNK)
while (len >= GHASH_CHUNK) {
size_t j = GHASH_CHUNK;
@@ -1109,11 +1092,11 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
(*block) (ctx->Yi.c, ctx->EKi.c, key);
++ctr;
if (is_endian.little)
-# ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-# else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-# endif
+# endif
else
ctx->Yi.d[3] = ctr;
for (i = 0; i < 16 / sizeof(size_t); ++i)
@@ -1125,6 +1108,7 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
GHASH(ctx, out - GHASH_CHUNK, GHASH_CHUNK);
len -= GHASH_CHUNK;
}
+# endif
if ((i = (len & (size_t)-16))) {
size_t j = i;
@@ -1225,9 +1209,7 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
unsigned int n, ctr;
size_t i;
u64 mlen = ctx->len.u[1];
@@ -1235,7 +1217,7 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
void *key = ctx->key;
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
-# ifdef GHASH
+# if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
# endif
@@ -1284,7 +1266,8 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
if (((size_t)in | (size_t)out) % sizeof(size_t) != 0)
break;
# endif
-# if defined(GHASH) && defined(GHASH_CHUNK)
+# if defined(GHASH)
+# if defined(GHASH_CHUNK)
while (len >= GHASH_CHUNK) {
size_t j = GHASH_CHUNK;
@@ -1296,11 +1279,11 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
(*block) (ctx->Yi.c, ctx->EKi.c, key);
++ctr;
if (is_endian.little)
-# ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-# else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-# endif
+# endif
else
ctx->Yi.d[3] = ctr;
for (i = 0; i < 16 / sizeof(size_t); ++i)
@@ -1311,6 +1294,7 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
}
len -= GHASH_CHUNK;
}
+# endif
if ((i = (len & (size_t)-16))) {
GHASH(ctx, in, i);
while (len >= 16) {
@@ -1414,23 +1398,24 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
const unsigned char *in, unsigned char *out,
size_t len, ctr128_f stream)
{
+#if defined(OPENSSL_SMALL_FOOTPRINT)
+ return CRYPTO_gcm128_encrypt(ctx, in, out, len);
+#else
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
unsigned int n, ctr;
size_t i;
u64 mlen = ctx->len.u[1];
void *key = ctx->key;
-#ifdef GCM_FUNCREF_4BIT
+# ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
-# ifdef GHASH
+# ifdef GHASH
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
+# endif
# endif
-#endif
mlen += len;
if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
@@ -1444,11 +1429,11 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
}
if (is_endian.little)
-#ifdef BSWAP4
+# ifdef BSWAP4
ctr = BSWAP4(ctx->Yi.d[3]);
-#else
+# else
ctr = GETU32(ctx->Yi.c + 12);
-#endif
+# endif
else
ctr = ctx->Yi.d[3];
@@ -1466,16 +1451,16 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
return 0;
}
}
-#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
+# if defined(GHASH) && defined(GHASH_CHUNK)
while (len >= GHASH_CHUNK) {
(*stream) (in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
ctr += GHASH_CHUNK / 16;
if (is_endian.little)
-# ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-# else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-# endif
+# endif
else
ctx->Yi.d[3] = ctr;
GHASH(ctx, out, GHASH_CHUNK);
@@ -1483,43 +1468,43 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
in += GHASH_CHUNK;
len -= GHASH_CHUNK;
}
-#endif
+# endif
if ((i = (len & (size_t)-16))) {
size_t j = i / 16;
(*stream) (in, out, j, key, ctx->Yi.c);
ctr += (unsigned int)j;
if (is_endian.little)
-#ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-#else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-#endif
+# endif
else
ctx->Yi.d[3] = ctr;
in += i;
len -= i;
-#if defined(GHASH)
+# if defined(GHASH)
GHASH(ctx, out, i);
out += i;
-#else
+# else
while (j--) {
for (i = 0; i < 16; ++i)
ctx->Xi.c[i] ^= out[i];
GCM_MUL(ctx, Xi);
out += 16;
}
-#endif
+# endif
}
if (len) {
(*ctx->block) (ctx->Yi.c, ctx->EKi.c, key);
++ctr;
if (is_endian.little)
-#ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-#else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-#endif
+# endif
else
ctx->Yi.d[3] = ctr;
while (len--) {
@@ -1530,29 +1515,31 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
ctx->mres = n;
return 0;
+#endif
}
int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
const unsigned char *in, unsigned char *out,
size_t len, ctr128_f stream)
{
+#if defined(OPENSSL_SMALL_FOOTPRINT)
+ return CRYPTO_gcm128_decrypt(ctx, in, out, len);
+#else
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
unsigned int n, ctr;
size_t i;
u64 mlen = ctx->len.u[1];
void *key = ctx->key;
-#ifdef GCM_FUNCREF_4BIT
+# ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
-# ifdef GHASH
+# ifdef GHASH
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
+# endif
# endif
-#endif
mlen += len;
if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
@@ -1566,11 +1553,11 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
}
if (is_endian.little)
-#ifdef BSWAP4
+# ifdef BSWAP4
ctr = BSWAP4(ctx->Yi.d[3]);
-#else
+# else
ctr = GETU32(ctx->Yi.c + 12);
-#endif
+# endif
else
ctr = ctx->Yi.d[3];
@@ -1590,30 +1577,30 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
return 0;
}
}
-#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
+# if defined(GHASH) && defined(GHASH_CHUNK)
while (len >= GHASH_CHUNK) {
GHASH(ctx, in, GHASH_CHUNK);
(*stream) (in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
ctr += GHASH_CHUNK / 16;
if (is_endian.little)
-# ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-# else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-# endif
+# endif
else
ctx->Yi.d[3] = ctr;
out += GHASH_CHUNK;
in += GHASH_CHUNK;
len -= GHASH_CHUNK;
}
-#endif
+# endif
if ((i = (len & (size_t)-16))) {
size_t j = i / 16;
-#if defined(GHASH)
+# if defined(GHASH)
GHASH(ctx, in, i);
-#else
+# else
while (j--) {
size_t k;
for (k = 0; k < 16; ++k)
@@ -1623,15 +1610,15 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
}
j = i / 16;
in -= i;
-#endif
+# endif
(*stream) (in, out, j, key, ctx->Yi.c);
ctr += (unsigned int)j;
if (is_endian.little)
-#ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-#else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-#endif
+# endif
else
ctx->Yi.d[3] = ctr;
out += i;
@@ -1642,11 +1629,11 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
(*ctx->block) (ctx->Yi.c, ctx->EKi.c, key);
++ctr;
if (is_endian.little)
-#ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-#else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-#endif
+# endif
else
ctx->Yi.d[3] = ctr;
while (len--) {
@@ -1659,6 +1646,7 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
ctx->mres = n;
return 0;
+#endif
}
int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag,
@@ -1667,9 +1655,7 @@ int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag,
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
u64 alen = ctx->len.u[0] << 3;
u64 clen = ctx->len.u[1] << 3;
#ifdef GCM_FUNCREF_4BIT