aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorAndrew Stubbs <ams@baylibre.com>2024-03-15 14:21:15 +0000
committerAndrew Stubbs <ams@baylibre.com>2024-03-22 14:14:00 +0000
commite4e02c07d93559a037608c73e8153549b5104fbb (patch)
tree729ef1c0ff9af25e6306910dc55dfb4197ece3e3 /gcc
parenta364148530c28645ce87adbc58a66c9f32a325ab (diff)
downloadgcc-e4e02c07d93559a037608c73e8153549b5104fbb.zip
gcc-e4e02c07d93559a037608c73e8153549b5104fbb.tar.gz
gcc-e4e02c07d93559a037608c73e8153549b5104fbb.tar.bz2
vect: more oversized bitmask fixups
These patches fix up a failure in testcase vect/tsvc/vect-tsvc-s278.c when configured to use V32 instead of V64 (I plan to do this for RDNA devices). The problem was that a "not" operation on the mask inadvertently enabled inactive lanes 31-63 and corrupted the output. The fix is to adjust the mask when calling internal functions (in this case COND_MINUS), when doing masked loads and stores, and when doing conditional jumps (some cases were already handled). gcc/ChangeLog: * dojump.cc (do_compare_rtx_and_jump): Clear excess bits in vector bitmasks. (do_compare_and_jump): Remove now-redundant similar code. * internal-fn.cc (expand_fn_using_insn): Clear excess bits in vector bitmasks. (add_mask_and_len_args): Likewise.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/dojump.cc34
-rw-r--r--gcc/internal-fn.cc26
2 files changed, 44 insertions, 16 deletions
diff --git a/gcc/dojump.cc b/gcc/dojump.cc
index 88600cb..5f74b69 100644
--- a/gcc/dojump.cc
+++ b/gcc/dojump.cc
@@ -1235,6 +1235,24 @@ do_compare_rtx_and_jump (rtx op0, rtx op1, enum rtx_code code, int unsignedp,
}
}
+ /* For boolean vectors with less than mode precision
+ make sure to fill padding with consistent values. */
+ if (val
+ && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (val))
+ && SCALAR_INT_MODE_P (mode))
+ {
+ auto nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (val)).to_constant ();
+ if (maybe_ne (GET_MODE_PRECISION (mode), nunits))
+ {
+ op0 = expand_binop (mode, and_optab, op0,
+ GEN_INT ((HOST_WIDE_INT_1U << nunits) - 1),
+ NULL_RTX, true, OPTAB_WIDEN);
+ op1 = expand_binop (mode, and_optab, op1,
+ GEN_INT ((HOST_WIDE_INT_1U << nunits) - 1),
+ NULL_RTX, true, OPTAB_WIDEN);
+ }
+ }
+
emit_cmp_and_jump_insns (op0, op1, code, size, mode, unsignedp, val,
if_true_label, prob);
}
@@ -1266,7 +1284,6 @@ do_compare_and_jump (tree treeop0, tree treeop1, enum rtx_code signed_code,
machine_mode mode;
int unsignedp;
enum rtx_code code;
- unsigned HOST_WIDE_INT nunits;
/* Don't crash if the comparison was erroneous. */
op0 = expand_normal (treeop0);
@@ -1309,21 +1326,6 @@ do_compare_and_jump (tree treeop0, tree treeop1, enum rtx_code signed_code,
emit_insn (targetm.gen_canonicalize_funcptr_for_compare (new_op1, op1));
op1 = new_op1;
}
- /* For boolean vectors with less than mode precision
- make sure to fill padding with consistent values. */
- else if (VECTOR_BOOLEAN_TYPE_P (type)
- && SCALAR_INT_MODE_P (mode)
- && TYPE_VECTOR_SUBPARTS (type).is_constant (&nunits)
- && maybe_ne (GET_MODE_PRECISION (mode), nunits))
- {
- gcc_assert (code == EQ || code == NE);
- op0 = expand_binop (mode, and_optab, op0,
- GEN_INT ((HOST_WIDE_INT_1U << nunits) - 1), NULL_RTX,
- true, OPTAB_WIDEN);
- op1 = expand_binop (mode, and_optab, op1,
- GEN_INT ((HOST_WIDE_INT_1U << nunits) - 1), NULL_RTX,
- true, OPTAB_WIDEN);
- }
do_compare_rtx_and_jump (op0, op1, code, unsignedp, treeop0, mode,
((mode == BLKmode)
diff --git a/gcc/internal-fn.cc b/gcc/internal-fn.cc
index fcf47c7..5269f0a 100644
--- a/gcc/internal-fn.cc
+++ b/gcc/internal-fn.cc
@@ -245,6 +245,18 @@ expand_fn_using_insn (gcall *stmt, insn_code icode, unsigned int noutputs,
&& SSA_NAME_IS_DEFAULT_DEF (rhs)
&& VAR_P (SSA_NAME_VAR (rhs)))
create_undefined_input_operand (&ops[opno], TYPE_MODE (rhs_type));
+ else if (VECTOR_BOOLEAN_TYPE_P (rhs_type)
+ && SCALAR_INT_MODE_P (TYPE_MODE (rhs_type))
+ && maybe_ne (GET_MODE_PRECISION (TYPE_MODE (rhs_type)),
+ TYPE_VECTOR_SUBPARTS (rhs_type).to_constant ()))
+ {
+ /* Ensure that the vector bitmasks do not have excess bits. */
+ int nunits = TYPE_VECTOR_SUBPARTS (rhs_type).to_constant ();
+ rtx tmp = expand_binop (TYPE_MODE (rhs_type), and_optab, rhs_rtx,
+ GEN_INT ((HOST_WIDE_INT_1U << nunits) - 1),
+ NULL_RTX, true, OPTAB_WIDEN);
+ create_input_operand (&ops[opno], tmp, TYPE_MODE (rhs_type));
+ }
else
create_input_operand (&ops[opno], rhs_rtx, TYPE_MODE (rhs_type));
opno += 1;
@@ -312,6 +324,20 @@ add_mask_and_len_args (expand_operand *ops, unsigned int opno, gcall *stmt)
{
tree mask = gimple_call_arg (stmt, mask_index);
rtx mask_rtx = expand_normal (mask);
+
+ tree mask_type = TREE_TYPE (mask);
+ if (VECTOR_BOOLEAN_TYPE_P (mask_type)
+ && SCALAR_INT_MODE_P (TYPE_MODE (mask_type))
+ && maybe_ne (GET_MODE_PRECISION (TYPE_MODE (mask_type)),
+ TYPE_VECTOR_SUBPARTS (mask_type).to_constant ()))
+ {
+ /* Ensure that the vector bitmasks do not have excess bits. */
+ int nunits = TYPE_VECTOR_SUBPARTS (mask_type).to_constant ();
+ mask_rtx = expand_binop (TYPE_MODE (mask_type), and_optab, mask_rtx,
+ GEN_INT ((HOST_WIDE_INT_1U << nunits) - 1),
+ NULL_RTX, true, OPTAB_WIDEN);
+ }
+
create_input_operand (&ops[opno++], mask_rtx,
TYPE_MODE (TREE_TYPE (mask)));
}