summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2022-01-19 10:31:09 +0100
committerHerbert Xu <herbert@gondor.apana.org.au>2022-01-31 11:21:44 +1100
commitcc9341ca44807fa5ca77e93809377f0efb523d1f (patch)
treedb58593fbb58cdfdf9c4381e7c123a8114c368f0
parent9da42763d07966912c2c6eeed23a75f0c0342f15 (diff)
downloadlinux-crypto-cc9341ca44807fa5ca77e93809377f0efb523d1f.tar.gz
linux-crypto-cc9341ca44807fa5ca77e93809377f0efb523d1f.zip
crypto: memneq - avoid implicit unaligned accesses
The C standard does not support dereferencing pointers that are not aligned with respect to the pointed-to type, and doing so is technically undefined behavior, even if the underlying hardware supports it. This means that conditionally dereferencing such pointers based on whether CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y is not the right thing to do, and actually results in alignment faults on ARM, which are fixed up on a slow path. Instead, we should use the unaligned accessors in such cases: on architectures that don't care about alignment, they will result in identical codegen whereas, e.g., codegen on ARM will avoid doubleword loads and stores but use ordinary ones, which are able to tolerate misalignment. Link: https://lore.kernel.org/linux-crypto/CAHk-=wiKkdYLY0bv+nXrcJz3NH9mAqPAafX7PpW5EwVtxsEu7Q@mail.gmail.com/ Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--crypto/memneq.c22
1 files changed, 15 insertions, 7 deletions
diff --git a/crypto/memneq.c b/crypto/memneq.c
index afed1bd1..fb11608b 100644
--- a/crypto/memneq.c
+++ b/crypto/memneq.c
@@ -60,6 +60,7 @@
*/
#include <crypto/algapi.h>
+#include <asm/unaligned.h>
#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
@@ -71,7 +72,8 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size)
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
while (size >= sizeof(unsigned long)) {
- neq |= *(unsigned long *)a ^ *(unsigned long *)b;
+ neq |= get_unaligned((unsigned long *)a) ^
+ get_unaligned((unsigned long *)b);
OPTIMIZER_HIDE_VAR(neq);
a += sizeof(unsigned long);
b += sizeof(unsigned long);
@@ -95,18 +97,24 @@ static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (sizeof(unsigned long) == 8) {
- neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b);
+ neq |= get_unaligned((unsigned long *)a) ^
+ get_unaligned((unsigned long *)b);
OPTIMIZER_HIDE_VAR(neq);
- neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8);
+ neq |= get_unaligned((unsigned long *)(a + 8)) ^
+ get_unaligned((unsigned long *)(b + 8));
OPTIMIZER_HIDE_VAR(neq);
} else if (sizeof(unsigned int) == 4) {
- neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b);
+ neq |= get_unaligned((unsigned int *)a) ^
+ get_unaligned((unsigned int *)b);
OPTIMIZER_HIDE_VAR(neq);
- neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4);
+ neq |= get_unaligned((unsigned int *)(a + 4)) ^
+ get_unaligned((unsigned int *)(b + 4));
OPTIMIZER_HIDE_VAR(neq);
- neq |= *(unsigned int *)(a+8) ^ *(unsigned int *)(b+8);
+ neq |= get_unaligned((unsigned int *)(a + 8)) ^
+ get_unaligned((unsigned int *)(b + 8));
OPTIMIZER_HIDE_VAR(neq);
- neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12);
+ neq |= get_unaligned((unsigned int *)(a + 12)) ^
+ get_unaligned((unsigned int *)(b + 12));
OPTIMIZER_HIDE_VAR(neq);
} else
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */