aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2023-12-08 09:02:15 +0100
committerJakub Jelinek <jakub@redhat.com>2023-12-08 09:02:15 +0100
commitb5cfbb8f4cceb621d6812eec3e0fb876b648241c (patch)
tree49a108e298046a33cbfa9acac6a8c215cbcf16e1
parent8f60f5499e10d19218cada082e0909516ebf0e74 (diff)
downloadgcc-b5cfbb8f4cceb621d6812eec3e0fb876b648241c.zip
gcc-b5cfbb8f4cceb621d6812eec3e0fb876b648241c.tar.gz
gcc-b5cfbb8f4cceb621d6812eec3e0fb876b648241c.tar.bz2
vr-values: Avoid ICEs on large _BitInt cast to floating point [PR112901]
For casts from integers to floating point, simplify_float_conversion_using_ranges uses SCALAR_INT_TYPE_MODE and queries optabs on the optimization it wants to make. That doesn't really work for large/huge BITINT_TYPE, those have BLKmode which is not scalar int mode. Querying an optab is not useful for that either. I think it is best to just skip this optimization for those bitints, after all, bitint lowering uses ranges already to determine minimum precision for bitint operands of the integer to float casts. 2023-12-08 Jakub Jelinek <jakub@redhat.com> PR tree-optimization/112901 * vr-values.cc (simplify_using_ranges::simplify_float_conversion_using_ranges): Return false if rhs1 has BITINT_TYPE type with BLKmode TYPE_MODE. * gcc.dg/bitint-51.c: New test.
-rw-r--r--gcc/testsuite/gcc.dg/bitint-51.c14
-rw-r--r--gcc/vr-values.cc5
2 files changed, 19 insertions, 0 deletions
diff --git a/gcc/testsuite/gcc.dg/bitint-51.c b/gcc/testsuite/gcc.dg/bitint-51.c
new file mode 100644
index 0000000..0574070
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitint-51.c
@@ -0,0 +1,14 @@
+/* PR tree-optimization/112901 */
+/* { dg-do compile { target bitint } } */
+/* { dg-options "-O2" } */
+
+float f;
+#if __BITINT_MAXWIDTH__ >= 256
+_BitInt(256) i;
+
+void
+foo (void)
+{
+ f *= 4 * i;
+}
+#endif
diff --git a/gcc/vr-values.cc b/gcc/vr-values.cc
index ecb2941..7115389 100644
--- a/gcc/vr-values.cc
+++ b/gcc/vr-values.cc
@@ -1656,6 +1656,11 @@ simplify_using_ranges::simplify_float_conversion_using_ranges
|| vr.undefined_p ())
return false;
+ /* The code below doesn't work for large/huge _BitInt, nor is really
+ needed for those, bitint lowering does use ranges already. */
+ if (TREE_CODE (TREE_TYPE (rhs1)) == BITINT_TYPE
+ && TYPE_MODE (TREE_TYPE (rhs1)) == BLKmode)
+ return false;
/* First check if we can use a signed type in place of an unsigned. */
scalar_int_mode rhs_mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (rhs1));
if (TYPE_UNSIGNED (TREE_TYPE (rhs1))