aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2021-01-26 14:48:26 +0100
committerJakub Jelinek <jakub@redhat.com>2021-01-26 14:48:26 +0100
commit17ad8cdebe65b47d257d85849747b806af0a85fd (patch)
tree0a88645a3bf554c74155b9c0d09cfccc8525e394
parent5bbc80d0e40b6770b61c471ab40a839e255618f9 (diff)
downloadgcc-17ad8cdebe65b47d257d85849747b806af0a85fd.zip
gcc-17ad8cdebe65b47d257d85849747b806af0a85fd.tar.gz
gcc-17ad8cdebe65b47d257d85849747b806af0a85fd.tar.bz2
aarch64: Tighten up checks for ubfix [PR98681]
The testcase in the patch doesn't assemble, because the instruction requires that the penultimate operand (lsb) range is [0, 32] (or [0, 64]) and the last operand's range is [1, 32 - lsb] (or [1, 64 - lsb]). The INTVAL (shft_amnt) < GET_MODE_BITSIZE (mode) will accept the lsb operand to be in range [MIN, 32] (or [MIN, 64]) and then we invoke UB in the compiler and sometimes it will make it through. The patch changes all the INTVAL uses in that function to UINTVAL, which isn't strictly necessary, but can be done (e.g. after the UINTVAL (shft_amnt) < GET_MODE_BITSIZE (mode) check we know it is not negative and thus INTVAL (shft_amnt) and UINTVAL (shft_amnt) then behave the same. But, I had to add INTVAL (mask) > 0 check in that case, otherwise we risk (hypothetically) emitting instruction that doesn't assemble. The problem is with masks that have the MSB bit set, while the instruction can handle those, e.g. ubfiz w1, w0, 13, 19 will do (w0 << 13) & 0xffffe000 in RTL we represent SImode constants with MSB set as negative HOST_WIDE_INT, so it will actually be HOST_WIDE_INT_C (0xffffffffffffe000), and the instruction uses %P3 to print the last operand, which calls asm_fprintf (f, "%u", popcount_hwi (INTVAL (x))) to print that. But that will not print 19, but 51 instead, will include there also all the copies of the sign bit. Not supporting those masks with MSB set isn't a big loss though, they really shouldn't appear normally, as both GIMPLE and RTL optimizations should optimize those away (one isn't masking any bits off with such masks, so just w0 << 13 will do too). 2021-01-26 Jakub Jelinek <jakub@redhat.com> PR target/98681 * config/aarch64/aarch64.c (aarch64_mask_and_shift_for_ubfiz_p): Use UINTVAL (shft_amnt) and UINTVAL (mask) instead of INTVAL (shft_amnt) and INTVAL (mask). Add && INTVAL (mask) > 0 condition. * gcc.c-torture/execute/pr98681.c: New test.
-rw-r--r--gcc/config/aarch64/aarch64.c9
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr98681.c18
2 files changed, 23 insertions, 4 deletions
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index c19dc6c..b6192e5 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -12060,10 +12060,11 @@ aarch64_mask_and_shift_for_ubfiz_p (scalar_int_mode mode, rtx mask,
rtx shft_amnt)
{
return CONST_INT_P (mask) && CONST_INT_P (shft_amnt)
- && INTVAL (shft_amnt) < GET_MODE_BITSIZE (mode)
- && exact_log2 ((INTVAL (mask) >> INTVAL (shft_amnt)) + 1) >= 0
- && (INTVAL (mask)
- & ((HOST_WIDE_INT_1U << INTVAL (shft_amnt)) - 1)) == 0;
+ && INTVAL (mask) > 0
+ && UINTVAL (shft_amnt) < GET_MODE_BITSIZE (mode)
+ && exact_log2 ((UINTVAL (mask) >> UINTVAL (shft_amnt)) + 1) >= 0
+ && (UINTVAL (mask)
+ & ((HOST_WIDE_INT_1U << UINTVAL (shft_amnt)) - 1)) == 0;
}
/* Return true if the masks and a shift amount from an RTX of the form
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr98681.c b/gcc/testsuite/gcc.c-torture/execute/pr98681.c
new file mode 100644
index 0000000..a256881
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/pr98681.c
@@ -0,0 +1,18 @@
+/* PR target/98681 */
+
+__attribute__((noipa)) int
+foo (int x)
+{
+ if (x > 32)
+ return (x << -64) & 255;
+ else
+ return x;
+}
+
+int
+main ()
+{
+ if (foo (32) != 32 || foo (-150) != -150)
+ __builtin_abort ();
+ return 0;
+}