aboutsummaryrefslogtreecommitdiff
path: root/crypt
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2023-02-09 10:26:43 -0300
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2023-02-17 15:56:41 -0300
commit11053fd4245c5cc0dbd885122e0ef28d11b2db7e (patch)
treeb3402a8d187d4fd7edb7da7248323def042b47ff /crypt
parent609054152fd77c9b572bb04c4af2f8da1ed0c86e (diff)
downloadglibc-11053fd4245c5cc0dbd885122e0ef28d11b2db7e.zip
glibc-11053fd4245c5cc0dbd885122e0ef28d11b2db7e.tar.gz
glibc-11053fd4245c5cc0dbd885122e0ef28d11b2db7e.tar.bz2
crypto: Remove _STRING_ARCH_unaligned usage
Assume unaligned inputs on all cases. The code is built and used only in compat mode. Checked on x86_64-linux-gnu and i686-linux-gnu. Reviewed-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
Diffstat (limited to 'crypt')
-rw-r--r--crypt/md5.c24
-rw-r--r--crypt/sha256.c28
-rw-r--r--crypt/sha512.c26
3 files changed, 13 insertions, 65 deletions
diff --git a/crypt/md5.c b/crypt/md5.c
index c7a232a..03240a9 100644
--- a/crypt/md5.c
+++ b/crypt/md5.c
@@ -229,27 +229,11 @@ md5_process_bytes (const void *buffer, size_t len, struct md5_ctx *ctx)
/* Process available complete blocks. */
if (len >= 64)
{
-#if !_STRING_ARCH_unaligned
-/* To check alignment gcc has an appropriate operator. Other
- compilers don't. */
-# if __GNUC__ >= 2
-# define UNALIGNED_P(p) (((md5_uintptr) p) % __alignof__ (md5_uint32) != 0)
-# else
-# define UNALIGNED_P(p) (((md5_uintptr) p) % sizeof (md5_uint32) != 0)
-# endif
- if (UNALIGNED_P (buffer))
- while (len > 64)
- {
- __md5_process_block (memcpy (ctx->buffer, buffer, 64), 64, ctx);
- buffer = (const char *) buffer + 64;
- len -= 64;
- }
- else
-#endif
+ while (len > 64)
{
- __md5_process_block (buffer, len & ~63, ctx);
- buffer = (const char *) buffer + (len & ~63);
- len &= 63;
+ __md5_process_block (memcpy (ctx->buffer, buffer, 64), 64, ctx);
+ buffer = (const char *) buffer + 64;
+ len -= 64;
}
}
diff --git a/crypt/sha256.c b/crypt/sha256.c
index 93b7399..96153d6 100644
--- a/crypt/sha256.c
+++ b/crypt/sha256.c
@@ -120,13 +120,9 @@ __sha256_finish_ctx (struct sha256_ctx *ctx, void *resbuf)
memcpy (&ctx->buffer[bytes], fillbuf, pad);
/* Put the 64-bit file length in *bits* at the end of the buffer. */
-#if _STRING_ARCH_unaligned
- ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
-#else
ctx->buffer32[(bytes + pad + 4) / 4] = SWAP (ctx->total[TOTAL64_low] << 3);
ctx->buffer32[(bytes + pad) / 4] = SWAP ((ctx->total[TOTAL64_high] << 3)
| (ctx->total[TOTAL64_low] >> 29));
-#endif
/* Process last bytes. */
__sha256_process_block (ctx->buffer, bytes + pad + 8, ctx);
@@ -169,27 +165,11 @@ __sha256_process_bytes (const void *buffer, size_t len, struct sha256_ctx *ctx)
/* Process available complete blocks. */
if (len >= 64)
{
-#if !_STRING_ARCH_unaligned
-/* To check alignment gcc has an appropriate operator. Other
- compilers don't. */
-# if __GNUC__ >= 2
-# define UNALIGNED_P(p) (((uintptr_t) p) % __alignof__ (uint32_t) != 0)
-# else
-# define UNALIGNED_P(p) (((uintptr_t) p) % sizeof (uint32_t) != 0)
-# endif
- if (UNALIGNED_P (buffer))
- while (len > 64)
- {
- __sha256_process_block (memcpy (ctx->buffer, buffer, 64), 64, ctx);
- buffer = (const char *) buffer + 64;
- len -= 64;
- }
- else
-#endif
+ while (len > 64)
{
- __sha256_process_block (buffer, len & ~63, ctx);
- buffer = (const char *) buffer + (len & ~63);
- len &= 63;
+ __sha256_process_block (memcpy (ctx->buffer, buffer, 64), 64, ctx);
+ buffer = (const char *) buffer + 64;
+ len -= 64;
}
}
diff --git a/crypt/sha512.c b/crypt/sha512.c
index d7e51b3..ceabad1 100644
--- a/crypt/sha512.c
+++ b/crypt/sha512.c
@@ -192,28 +192,12 @@ __sha512_process_bytes (const void *buffer, size_t len, struct sha512_ctx *ctx)
/* Process available complete blocks. */
if (len >= 128)
{
-#if !_STRING_ARCH_unaligned
-/* To check alignment gcc has an appropriate operator. Other
- compilers don't. */
-# if __GNUC__ >= 2
-# define UNALIGNED_P(p) (((uintptr_t) p) % __alignof__ (uint64_t) != 0)
-# else
-# define UNALIGNED_P(p) (((uintptr_t) p) % sizeof (uint64_t) != 0)
-# endif
- if (UNALIGNED_P (buffer))
- while (len > 128)
- {
- __sha512_process_block (memcpy (ctx->buffer, buffer, 128), 128,
- ctx);
- buffer = (const char *) buffer + 128;
- len -= 128;
- }
- else
-#endif
+ while (len > 128)
{
- __sha512_process_block (buffer, len & ~127, ctx);
- buffer = (const char *) buffer + (len & ~127);
- len &= 127;
+ __sha512_process_block (memcpy (ctx->buffer, buffer, 128), 128,
+ ctx);
+ buffer = (const char *) buffer + 128;
+ len -= 128;
}
}