diff options
author | Richard Earnshaw <rearnsha@arm.com> | 2019-10-31 16:04:53 +0000 |
---|---|---|
committer | Richard Earnshaw <rearnsha@gcc.gnu.org> | 2019-10-31 16:04:53 +0000 |
commit | d84b9ad53fed75590c3ebc6e38bd49db3b201128 (patch) | |
tree | ce3b64c8ef2a81cef3e94d237f672cb1df5004d4 | |
parent | 946b4a68b3766ddbdb2290855c5167bd30c933a0 (diff) | |
download | gcc-d84b9ad53fed75590c3ebc6e38bd49db3b201128.zip gcc-d84b9ad53fed75590c3ebc6e38bd49db3b201128.tar.gz gcc-d84b9ad53fed75590c3ebc6e38bd49db3b201128.tar.bz2 |
[arm] Pattern match insns for a + ~b + Carry
On ARM, the SBC instruction is defined as
Ra - Rb - ~C
where C is the carry flag. But -Rb = ~Rb + 1, so this is equivalent to
Ra + ~Rb + 1 - ~C
which then simplifies to
Ra + ~Rb + C
which is essentially an add-with-carry with one operand inverted. We
can define RTL patterns to match this. In thumb2 we can only match
when the operands are both registers, but in Arm state we can also use
RSC to match when Rn is either a constant or a shifted operand.
This overall simplifies some cases of 64-bit arithmetic, for example,
int64_t f (int64_t a, int64_t b) { return a + ~b; }
will now compile to
MVN R2, R2
ADDS R0, R0, R2
SBC R1, R1, R3
* config/arm/arm.md (add_not_cin): New insn.
(add_not_shift_cin): Likewise.
From-SVN: r277676
-rw-r--r-- | gcc/ChangeLog | 5 | ||||
-rw-r--r-- | gcc/config/arm/arm.md | 35 |
2 files changed, 40 insertions, 0 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 5ce04fe..e520cf0 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,8 @@ +2019-10-31 Richard Earnshaw <rearnsha@arm.com> + + * config/arm/arm.md (add_not_cin): New insn. + (add_not_shift_cin): Likewise. + 2019-10-31 Martin Liska <mliska@suse.cz> * ipa-icf-gimple.c (func_checker::compare_tree_ssa_label): Remove. diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md index ae77cc3..4f035cb 100644 --- a/gcc/config/arm/arm.md +++ b/gcc/config/arm/arm.md @@ -1662,6 +1662,41 @@ (set_attr "type" "adc_imm")] ) +;; SBC performs Rn - Rm - ~C, but -Rm = ~Rm + 1 => Rn + ~Rm + 1 - ~C +;; => Rn + ~Rm + C, which is essentially ADC Rd, Rn, ~Rm +(define_insn "*add_not_cin" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (plus:SI + (plus:SI (not:SI (match_operand:SI 1 "s_register_operand" "r,r")) + (match_operand:SI 3 "arm_carry_operation" "")) + (match_operand:SI 2 "arm_rhs_operand" "r,I")))] + "TARGET_ARM || (TARGET_THUMB2 && !CONST_INT_P (operands[2]))" + "@ + sbc%?\\t%0, %2, %1 + rsc%?\\t%0, %1, %2" + [(set_attr "conds" "use") + (set_attr "predicable" "yes") + (set_attr "arch" "*,a") + (set_attr "type" "adc_reg,adc_imm")] +) + +;; On Arm we can also use the same trick when the non-inverted operand is +;; shifted, using RSC. +(define_insn "add_not_shift_cin" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (plus:SI + (plus:SI (match_operator:SI 3 "shift_operator" + [(match_operand:SI 1 "s_register_operand" "r,r") + (match_operand:SI 2 "shift_amount_operand" "M,r")]) + (not:SI (match_operand:SI 4 "s_register_operand" "r,r"))) + (match_operand:SI 5 "arm_carry_operation" "")))] + "TARGET_ARM" + "rsc%?\\t%0, %4, %1%S3" + [(set_attr "conds" "use") + (set_attr "predicable" "yes") + (set_attr "type" "alu_shift_imm,alu_shift_reg")] +) + (define_insn "cmpsi3_carryin_<CC_EXTEND>out" [(set (reg:<CC_EXTEND> CC_REGNUM) (compare:<CC_EXTEND> |