aboutsummaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@arm.com>2019-08-15 08:05:50 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2019-08-15 08:05:50 +0000
commit20103c0ea9336d2b5286eb7f2605ace3fd49a431 (patch)
tree0ba3460062f73c67ed90ea27a6520fa537c29f69 /gcc/config
parentcc8495056efac06fa57d36512903b06026c01470 (diff)
downloadgcc-20103c0ea9336d2b5286eb7f2605ace3fd49a431.zip
gcc-20103c0ea9336d2b5286eb7f2605ace3fd49a431.tar.gz
gcc-20103c0ea9336d2b5286eb7f2605ace3fd49a431.tar.bz2
Add support for conditional shifts
This patch adds support for IFN_COND shifts left and shifts right. This is mostly mechanical, but since we try to handle conditional operations in the same way as unconditional operations in match.pd, we need to support IFN_COND shifts by scalars as well as vectors. E.g.: IFN_COND_SHL (cond, a, { 1, 1, ... }, fallback) and: IFN_COND_SHL (cond, a, 1, fallback) are the same operation, with: (for shiftrotate (lrotate rrotate lshift rshift) ... /* Prefer vector1 << scalar to vector1 << vector2 if vector2 is uniform. */ (for vec (VECTOR_CST CONSTRUCTOR) (simplify (shiftrotate @0 vec@1) (with { tree tem = uniform_vector_p (@1); } (if (tem) (shiftrotate @0 { tem; })))))) preferring the latter. The patch copes with this by extending create_convert_operand_from to handle scalar-to-vector conversions. 2019-08-15 Richard Sandiford <richard.sandiford@arm.com> Prathamesh Kulkarni <prathamesh.kulkarni@linaro.org> gcc/ * internal-fn.def (IFN_COND_SHL, IFN_COND_SHR): New internal functions. * internal-fn.c (FOR_EACH_CODE_MAPPING): Handle shifts. * match.pd (UNCOND_BINARY, COND_BINARY): Likewise. * optabs.def (cond_ashl_optab, cond_ashr_optab, cond_lshr_optab): New optabs. * optabs.h (create_convert_operand_from): Expand comment. * optabs.c (maybe_legitimize_operand): Allow implicit broadcasts when mapping scalar rtxes to vector operands. * config/aarch64/iterators.md (SVE_INT_BINARY): Add ashift, ashiftrt and lshiftrt. (sve_int_op, sve_int_op_rev, sve_pred_int_rhs2_operand): Handle them. * config/aarch64/aarch64-sve.md (*cond_<optab><mode>_2_const) (*cond_<optab><mode>_any_const): New patterns. gcc/testsuite/ * gcc.target/aarch64/sve/cond_shift_1.c: New test. * gcc.target/aarch64/sve/cond_shift_1_run.c: Likewise. * gcc.target/aarch64/sve/cond_shift_2.c: Likewise. * gcc.target/aarch64/sve/cond_shift_2_run.c: Likewise. * gcc.target/aarch64/sve/cond_shift_3.c: Likewise. * gcc.target/aarch64/sve/cond_shift_3_run.c: Likewise. * gcc.target/aarch64/sve/cond_shift_4.c: Likewise. * gcc.target/aarch64/sve/cond_shift_4_run.c: Likewise. * gcc.target/aarch64/sve/cond_shift_5.c: Likewise. * gcc.target/aarch64/sve/cond_shift_5_run.c: Likewise. * gcc.target/aarch64/sve/cond_shift_6.c: Likewise. * gcc.target/aarch64/sve/cond_shift_6_run.c: Likewise. * gcc.target/aarch64/sve/cond_shift_7.c: Likewise. * gcc.target/aarch64/sve/cond_shift_7_run.c: Likewise. * gcc.target/aarch64/sve/cond_shift_8.c: Likewise. * gcc.target/aarch64/sve/cond_shift_8_run.c: Likewise. * gcc.target/aarch64/sve/cond_shift_9.c: Likewise. * gcc.target/aarch64/sve/cond_shift_9_run.c: Likewise. Co-Authored-By: Prathamesh Kulkarni <prathamesh.kulkarni@linaro.org> From-SVN: r274505
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/aarch64/aarch64-sve.md46
-rw-r--r--gcc/config/aarch64/iterators.md32
2 files changed, 67 insertions, 11 deletions
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index 43beb7e..b8156a1 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -1772,7 +1772,10 @@
;; Includes:
;; - ADD (merging form only)
;; - AND (merging form only)
+;; - ASR (merging form only)
;; - EOR (merging form only)
+;; - LSL (merging form only)
+;; - LSR (merging form only)
;; - MUL
;; - ORR (merging form only)
;; - SMAX
@@ -2405,6 +2408,49 @@
"<shift>\t%0.<Vetype>, %1.<Vetype>, #%2"
)
+;; Predicated integer shift, merging with the first input.
+(define_insn "*cond_<optab><mode>_2_const"
+ [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_I
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ (ASHIFT:SVE_I
+ (match_operand:SVE_I 2 "register_operand" "0, w")
+ (match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm"))
+ (match_dup 2)]
+ UNSPEC_SEL))]
+ "TARGET_SVE"
+ "@
+ <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
+ [(set_attr "movprfx" "*,yes")]
+)
+
+;; Predicated integer shift, merging with an independent value.
+(define_insn_and_rewrite "*cond_<optab><mode>_any_const"
+ [(set (match_operand:SVE_I 0 "register_operand" "=w, &w, ?&w")
+ (unspec:SVE_I
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ (ASHIFT:SVE_I
+ (match_operand:SVE_I 2 "register_operand" "w, w, w")
+ (match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm"))
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ UNSPEC_SEL))]
+ "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
+ "@
+ movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ #"
+ "&& reload_completed
+ && register_operand (operands[4], <MODE>mode)
+ && !rtx_equal_p (operands[0], operands[4])"
+ {
+ emit_insn (gen_vcond_mask_<mode><vpred> (operands[0], operands[2],
+ operands[4], operands[1]));
+ operands[4] = operands[2] = operands[0];
+ }
+ [(set_attr "movprfx" "yes")]
+)
+
;; -------------------------------------------------------------------------
;; ---- [FP] General binary arithmetic corresponding to rtx codes
;; -------------------------------------------------------------------------
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 3642ba1..81dddb7 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -1280,6 +1280,7 @@
;; SVE integer binary operations.
(define_code_iterator SVE_INT_BINARY [plus minus mult smax umax smin umin
+ ashift ashiftrt lshiftrt
and ior xor])
;; SVE integer binary division operations.
@@ -1475,6 +1476,9 @@
(smax "smax")
(umin "umin")
(umax "umax")
+ (ashift "lsl")
+ (ashiftrt "asr")
+ (lshiftrt "lsr")
(and "and")
(ior "orr")
(xor "eor")
@@ -1484,17 +1488,20 @@
(popcount "cnt")])
(define_code_attr sve_int_op_rev [(plus "add")
- (minus "subr")
- (mult "mul")
- (div "sdivr")
- (udiv "udivr")
- (smin "smin")
- (smax "smax")
- (umin "umin")
- (umax "umax")
- (and "and")
- (ior "orr")
- (xor "eor")])
+ (minus "subr")
+ (mult "mul")
+ (div "sdivr")
+ (udiv "udivr")
+ (smin "smin")
+ (smax "smax")
+ (umin "umin")
+ (umax "umax")
+ (ashift "lslr")
+ (ashiftrt "asrr")
+ (lshiftrt "lsrr")
+ (and "and")
+ (ior "orr")
+ (xor "eor")])
;; The floating-point SVE instruction that implements an rtx code.
(define_code_attr sve_fp_op [(plus "fadd")
@@ -1535,6 +1542,9 @@
(umax "register_operand")
(smin "register_operand")
(umin "register_operand")
+ (ashift "aarch64_sve_lshift_operand")
+ (ashiftrt "aarch64_sve_rshift_operand")
+ (lshiftrt "aarch64_sve_rshift_operand")
(and "aarch64_sve_pred_and_operand")
(ior "register_operand")
(xor "register_operand")])