aboutsummaryrefslogtreecommitdiff
path: root/gcc/tree-vect-patterns.c
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2021-09-01 13:30:51 +0200
committerJakub Jelinek <jakub@redhat.com>2021-09-01 13:40:43 +0200
commitbea07159d1d4c9a61c8f7097e9f88c2b206b1b2f (patch)
tree9df5780b3d76c871dc729f73b17152c3fcb2a76f /gcc/tree-vect-patterns.c
parenta61623d9b38d28048ccbd397d4ccf52c2dfb3fed (diff)
downloadgcc-bea07159d1d4c9a61c8f7097e9f88c2b206b1b2f.zip
gcc-bea07159d1d4c9a61c8f7097e9f88c2b206b1b2f.tar.gz
gcc-bea07159d1d4c9a61c8f7097e9f88c2b206b1b2f.tar.bz2
vectorizer: Fix up vectorization using WIDEN_MINUS_EXPR [PR102124]
The following testcase is miscompiled on aarch64-linux at -O3 since the introduction of WIDEN_MINUS_EXPR. The problem is if the inner type (half_type) is unsigned and the result type in which the subtraction is performed (type) has precision more than twice as larger as the inner type's precision. For other widening operations like WIDEN_{PLUS,MULT}_EXPR, if half_type is unsigned, the addition/multiplication result in itype is also unsigned and needs to be zero-extended to type. But subtraction is special, even when half_type is unsigned, the subtraction behaves as signed (also regardless of whether the result type is signed or unsigned), 0xfeU - 0xffU is -1 or 0xffffffffU, not 0x0000ffff. I think it is better not to use mixed signedness of types in WIDEN_MINUS_EXPR (have unsigned vector of operands and signed result vector), so this patch instead adds another cast to make sure we always sign-extend the result from itype to type if type is wider than itype. 2021-09-01 Jakub Jelinek <jakub@redhat.com> PR tree-optimization/102124 * tree-vect-patterns.c (vect_recog_widen_op_pattern): For ORIG_CODE MINUS_EXPR, if itype is unsigned with smaller precision than type, add an extra cast to signed variant of itype to ensure sign-extension. * gcc.dg/torture/pr102124.c: New test.
Diffstat (limited to 'gcc/tree-vect-patterns.c')
-rw-r--r--gcc/tree-vect-patterns.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index 8997340..e6c5bcd 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -1268,11 +1268,31 @@ vect_recog_widen_op_pattern (vec_info *vinfo,
/* Check target support */
tree vectype = get_vectype_for_scalar_type (vinfo, half_type);
tree vecitype = get_vectype_for_scalar_type (vinfo, itype);
+ tree ctype = itype;
+ tree vecctype = vecitype;
+ if (orig_code == MINUS_EXPR
+ && TYPE_UNSIGNED (itype)
+ && TYPE_PRECISION (type) > TYPE_PRECISION (itype))
+ {
+ /* Subtraction is special, even if half_type is unsigned and no matter
+ whether type is signed or unsigned, if type is wider than itype,
+ we need to sign-extend from the widening operation result to the
+ result type.
+ Consider half_type unsigned char, operand 1 0xfe, operand 2 0xff,
+ itype unsigned short and type either int or unsigned int.
+ Widened (unsigned short) 0xfe - (unsigned short) 0xff is
+ (unsigned short) 0xffff, but for type int we want the result -1
+ and for type unsigned int 0xffffffff rather than 0xffff. */
+ ctype = build_nonstandard_integer_type (TYPE_PRECISION (itype), 0);
+ vecctype = get_vectype_for_scalar_type (vinfo, ctype);
+ }
+
enum tree_code dummy_code;
int dummy_int;
auto_vec<tree> dummy_vec;
if (!vectype
|| !vecitype
+ || !vecctype
|| !supportable_widening_operation (vinfo, wide_code, last_stmt_info,
vecitype, vectype,
&dummy_code, &dummy_code,
@@ -1291,8 +1311,12 @@ vect_recog_widen_op_pattern (vec_info *vinfo,
gimple *pattern_stmt = gimple_build_assign (var, wide_code,
oprnd[0], oprnd[1]);
+ if (vecctype != vecitype)
+ pattern_stmt = vect_convert_output (vinfo, last_stmt_info, ctype,
+ pattern_stmt, vecitype);
+
return vect_convert_output (vinfo, last_stmt_info,
- type, pattern_stmt, vecitype);
+ type, pattern_stmt, vecctype);
}
/* Try to detect multiplication on widened inputs, converting MULT_EXPR