aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@arm.com>2019-07-12 08:14:34 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2019-07-12 08:14:34 +0000
commite7ba492a04d0bfef9752cbb16fcce3ffc31bf99f (patch)
treebeab0d611bc7664962aa004ab9250ec833045ae6 /gcc
parentd281492de84960b5885f88fffeeb226650f5141d (diff)
downloadgcc-e7ba492a04d0bfef9752cbb16fcce3ffc31bf99f.zip
gcc-e7ba492a04d0bfef9752cbb16fcce3ffc31bf99f.tar.gz
gcc-e7ba492a04d0bfef9752cbb16fcce3ffc31bf99f.tar.bz2
[arch64] Fix ambiguous .md attribute uses
This patch is part of a series that fixes ambiguous attribute uses in .md files, i.e. cases in which attributes didn't use <ITER:ATTR> to specify an iterator, and in which <ATTR> could have different values depending on the iterator chosen. No behavioural change except for dropping the unused SVE divide permutations. 2019-07-12 Richard Sandiford <richard.sandiford@arm.com> gcc/ * config/aarch64/aarch64.md (*compare_condjump<mode>) (loadwb_pair<GPI:mode>_<P:mode>, loadwb_pair<GPF:mode>_<P:mode>) (storewb_pair<GPI:mode>_<P:mode>, storewb_pair<GPF:mode>_<P:mode>) (*ands<mode>_compare0): Fix ambiguous uses of .md attributes. * config/aarch64/aarch64-simd.md (*aarch64_get_lane_extend<GPI:mode><VDQQH:mode>): Likewise. (*aarch64_get_lane_zero_extend<GPI:mode><VDQQH:mode>): Likewise. * config/aarch64/aarch64-sve.md (while_ult<GPI:mode><PRED_ALL:mode>): Likewise. (*cond_<optab><mode>_any): Fix SVE_I/SVE_SDI typo. From-SVN: r273433
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog13
-rw-r--r--gcc/config/aarch64/aarch64-simd.md37
-rw-r--r--gcc/config/aarch64/aarch64-sve.md2
-rw-r--r--gcc/config/aarch64/aarch64.md29
4 files changed, 48 insertions, 33 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 0894db7..66e715f 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,18 @@
2019-07-12 Richard Sandiford <richard.sandiford@arm.com>
+ * config/aarch64/aarch64.md (*compare_condjump<mode>)
+ (loadwb_pair<GPI:mode>_<P:mode>, loadwb_pair<GPF:mode>_<P:mode>)
+ (storewb_pair<GPI:mode>_<P:mode>, storewb_pair<GPF:mode>_<P:mode>)
+ (*ands<mode>_compare0): Fix ambiguous uses of .md attributes.
+ * config/aarch64/aarch64-simd.md
+ (*aarch64_get_lane_extend<GPI:mode><VDQQH:mode>): Likewise.
+ (*aarch64_get_lane_zero_extend<GPI:mode><VDQQH:mode>): Likewise.
+ * config/aarch64/aarch64-sve.md
+ (while_ult<GPI:mode><PRED_ALL:mode>): Likewise.
+ (*cond_<optab><mode>_any): Fix SVE_I/SVE_SDI typo.
+
+2019-07-12 Richard Sandiford <richard.sandiford@arm.com>
+
* doc/md.texi: Document that @ patterns can have different
numbers of operands.
* genemit.c (handle_overloaded_gen): Handle this case.
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 0c2600f..d480e43 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -3135,30 +3135,31 @@
(define_insn "*aarch64_get_lane_extend<GPI:mode><VDQQH:mode>"
[(set (match_operand:GPI 0 "register_operand" "=r")
(sign_extend:GPI
- (vec_select:<VEL>
+ (vec_select:<VDQQH:VEL>
(match_operand:VDQQH 1 "register_operand" "w")
(parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
"TARGET_SIMD"
{
- operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
+ operands[2] = aarch64_endian_lane_rtx (<VDQQH:MODE>mode,
+ INTVAL (operands[2]));
return "smov\\t%<GPI:w>0, %1.<VDQQH:Vetype>[%2]";
}
- [(set_attr "type" "neon_to_gp<q>")]
-)
-
-(define_insn "*aarch64_get_lane_zero_extend<GPI:mode><VDQQH:mode>"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (zero_extend:GPI
- (vec_select:<VEL>
- (match_operand:VDQQH 1 "register_operand" "w")
- (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
- "TARGET_SIMD"
- {
- operands[2] = aarch64_endian_lane_rtx (<VDQQH:MODE>mode,
- INTVAL (operands[2]));
- return "umov\\t%w0, %1.<Vetype>[%2]";
- }
- [(set_attr "type" "neon_to_gp<q>")]
+ [(set_attr "type" "neon_to_gp<VDQQH:q>")]
+)
+
+(define_insn "*aarch64_get_lane_zero_extend<GPI:mode><VDQQH:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (zero_extend:GPI
+ (vec_select:<VDQQH:VEL>
+ (match_operand:VDQQH 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
+ "TARGET_SIMD"
+ {
+ operands[2] = aarch64_endian_lane_rtx (<VDQQH:MODE>mode,
+ INTVAL (operands[2]));
+ return "umov\\t%w0, %1.<VDQQH:Vetype>[%2]";
+ }
+ [(set_attr "type" "neon_to_gp<VDQQH:q>")]
)
;; Lane extraction of a value, neither sign nor zero extension
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index c4670b6..e489afb 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -1363,7 +1363,7 @@
;; don't have an unnecessary PTRUE.
"&& !CONSTANT_P (operands[1])"
{
- operands[1] = CONSTM1_RTX (<MODE>mode);
+ operands[1] = CONSTM1_RTX (<PRED_ALL:MODE>mode);
}
)
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 4d559c4..d1b2c20 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -567,14 +567,14 @@
;; sub x0, x1, #(CST & 0xfff000)
;; subs x0, x0, #(CST & 0x000fff)
;; b<ne,eq> .Label
-(define_insn_and_split "*compare_condjump<mode>"
+(define_insn_and_split "*compare_condjump<GPI:mode>"
[(set (pc) (if_then_else (EQL
(match_operand:GPI 0 "register_operand" "r")
(match_operand:GPI 1 "aarch64_imm24" "n"))
(label_ref:P (match_operand 2 "" ""))
(pc)))]
- "!aarch64_move_imm (INTVAL (operands[1]), <MODE>mode)
- && !aarch64_plus_operand (operands[1], <MODE>mode)
+ "!aarch64_move_imm (INTVAL (operands[1]), <GPI:MODE>mode)
+ && !aarch64_plus_operand (operands[1], <GPI:MODE>mode)
&& !reload_completed"
"#"
"&& true"
@@ -582,11 +582,12 @@
{
HOST_WIDE_INT lo_imm = UINTVAL (operands[1]) & 0xfff;
HOST_WIDE_INT hi_imm = UINTVAL (operands[1]) & 0xfff000;
- rtx tmp = gen_reg_rtx (<MODE>mode);
- emit_insn (gen_add<mode>3 (tmp, operands[0], GEN_INT (-hi_imm)));
- emit_insn (gen_add<mode>3_compare0 (tmp, tmp, GEN_INT (-lo_imm)));
+ rtx tmp = gen_reg_rtx (<GPI:MODE>mode);
+ emit_insn (gen_add<GPI:mode>3 (tmp, operands[0], GEN_INT (-hi_imm)));
+ emit_insn (gen_add<GPI:mode>3_compare0 (tmp, tmp, GEN_INT (-lo_imm)));
rtx cc_reg = gen_rtx_REG (CC_NZmode, CC_REGNUM);
- rtx cmp_rtx = gen_rtx_fmt_ee (<EQL:CMP>, <MODE>mode, cc_reg, const0_rtx);
+ rtx cmp_rtx = gen_rtx_fmt_ee (<EQL:CMP>, <GPI:MODE>mode,
+ cc_reg, const0_rtx);
emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, operands[2]));
DONE;
}
@@ -1505,8 +1506,8 @@
(mem:GPI (plus:P (match_dup 1)
(match_operand:P 5 "const_int_operand" "n"))))])]
"INTVAL (operands[5]) == GET_MODE_SIZE (<GPI:MODE>mode)"
- "ldp\\t%<w>2, %<w>3, [%1], %4"
- [(set_attr "type" "load_<ldpstp_sz>")]
+ "ldp\\t%<GPI:w>2, %<GPI:w>3, [%1], %4"
+ [(set_attr "type" "load_<GPI:ldpstp_sz>")]
)
(define_insn "loadwb_pair<GPF:mode>_<P:mode>"
@@ -1520,7 +1521,7 @@
(mem:GPF (plus:P (match_dup 1)
(match_operand:P 5 "const_int_operand" "n"))))])]
"INTVAL (operands[5]) == GET_MODE_SIZE (<GPF:MODE>mode)"
- "ldp\\t%<w>2, %<w>3, [%1], %4"
+ "ldp\\t%<GPF:w>2, %<GPF:w>3, [%1], %4"
[(set_attr "type" "neon_load1_2reg")]
)
@@ -1553,8 +1554,8 @@
(match_operand:P 5 "const_int_operand" "n")))
(match_operand:GPI 3 "register_operand" "r"))])]
"INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)"
- "stp\\t%<w>2, %<w>3, [%0, %4]!"
- [(set_attr "type" "store_<ldpstp_sz>")]
+ "stp\\t%<GPI:w>2, %<GPI:w>3, [%0, %4]!"
+ [(set_attr "type" "store_<GPI:ldpstp_sz>")]
)
(define_insn "storewb_pair<GPF:mode>_<P:mode>"
@@ -1569,7 +1570,7 @@
(match_operand:P 5 "const_int_operand" "n")))
(match_operand:GPF 3 "register_operand" "w"))])]
"INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPF:MODE>mode)"
- "stp\\t%<w>2, %<w>3, [%0, %4]!"
+ "stp\\t%<GPF:w>2, %<GPF:w>3, [%0, %4]!"
[(set_attr "type" "neon_store1_2reg<q>")]
)
@@ -4782,7 +4783,7 @@
[(set_attr "type" "alus_imm")]
)
-(define_insn "*ands<mode>_compare0"
+(define_insn "*ands<GPI:mode>_compare0"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
(zero_extend:GPI (match_operand:SHORT 1 "register_operand" "r"))