diff options
author | James Greenhalgh <james.greenhalgh@arm.com> | 2013-01-08 14:57:33 +0000 |
---|---|---|
committer | James Greenhalgh <jgreenhalgh@gcc.gnu.org> | 2013-01-08 14:57:33 +0000 |
commit | 385eb93d42f33bf62a78e57a925a05d4d9715b80 (patch) | |
tree | d449449a11ce24c9039195c95b20d0a43b6872f9 /gcc/config | |
parent | 4dcd1054bf677f8022fd5c92704e46597018396c (diff) | |
download | gcc-385eb93d42f33bf62a78e57a925a05d4d9715b80.zip gcc-385eb93d42f33bf62a78e57a925a05d4d9715b80.tar.gz gcc-385eb93d42f33bf62a78e57a925a05d4d9715b80.tar.bz2 |
[AARCH64] Add support for floating-point vcond.
gcc/
* config/aarch64/aarch64-simd.md
(aarch64_simd_bsl<mode>_internal): Add floating-point modes.
(aarch64_simd_bsl): Likewise.
(aarch64_vcond_internal<mode>): Likewise.
(vcond<mode><mode>): Likewise.
(aarch64_cm<cmp><mode>): Fix constraints, add new modes.
* config/aarch64/iterators.md (V_cmp_result): Add V2DF.
gcc/testsuite/
* gcc/testsuite/gcc.target/aarch64/vect-fcm-eq-d.c: New.
* gcc/testsuite/gcc.target/aarch64/vect-fcm-eq-f.c: Likewise.
* gcc/testsuite/gcc.target/aarch64/vect-fcm-ge-d.c: Likewise.
* gcc/testsuite/gcc.target/aarch64/vect-fcm-ge-f.c: Likewise.
* gcc/testsuite/gcc.target/aarch64/vect-fcm-gt-d.c: Likewise.
* gcc/testsuite/gcc.target/aarch64/vect-fcm-gt-f.c: Likewise.
* gcc/testsuite/gcc.target/aarch64/vect-fcm.x: Likewise.
* gcc/testsuite/lib/target-supports.exp
(check_effective_target_vect_cond): Enable for AArch64.
From-SVN: r195018
Diffstat (limited to 'gcc/config')
-rw-r--r-- | gcc/config/aarch64/aarch64-simd.md | 103 |
1 files changed, 85 insertions, 18 deletions
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index d4b52c3..e6655e8 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -1463,7 +1463,7 @@ (set_attr "simd_mode" "V2SI")] ) -;; vbsl_* intrinsics may compile to any of vbsl/vbif/vbit depending on register +;; vbsl_* intrinsics may compile to any of bsl/bif/bit depending on register ;; allocation. For an intrinsic of form: ;; vD = bsl_* (vS, vN, vM) ;; We can use any of: @@ -1472,11 +1472,12 @@ ;; bif vD, vM, vS (if D = N, so 0-bits in vS choose bits from vM, else vN) (define_insn "aarch64_simd_bsl<mode>_internal" - [(set (match_operand:VDQ 0 "register_operand" "=w,w,w") - (unspec:VDQ [(match_operand:VDQ 1 "register_operand" " 0,w,w") - (match_operand:VDQ 2 "register_operand" " w,w,0") - (match_operand:VDQ 3 "register_operand" " w,0,w")] - UNSPEC_BSL))] + [(set (match_operand:VALL 0 "register_operand" "=w,w,w") + (unspec:VALL + [(match_operand:<V_cmp_result> 1 "register_operand" " 0,w,w") + (match_operand:VALL 2 "register_operand" " w,w,0") + (match_operand:VALL 3 "register_operand" " w,0,w")] + UNSPEC_BSL))] "TARGET_SIMD" "@ bsl\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype> @@ -1485,15 +1486,15 @@ ) (define_expand "aarch64_simd_bsl<mode>" - [(set (match_operand:VDQ 0 "register_operand") - (unspec:VDQ [(match_operand:<V_cmp_result> 1 "register_operand") - (match_operand:VDQ 2 "register_operand") - (match_operand:VDQ 3 "register_operand")] - UNSPEC_BSL))] + [(set (match_operand:VALL 0 "register_operand") + (unspec:VALL [(match_operand:<V_cmp_result> 1 "register_operand") + (match_operand:VALL 2 "register_operand") + (match_operand:VALL 3 "register_operand")] + UNSPEC_BSL))] "TARGET_SIMD" { /* We can't alias operands together if they have different modes. */ - operands[1] = gen_lowpart (<MODE>mode, operands[1]); + operands[1] = gen_lowpart (<V_cmp_result>mode, operands[1]); }) (define_expand "aarch64_vcond_internal<mode>" @@ -1574,14 +1575,64 @@ DONE; }) +(define_expand "aarch64_vcond_internal<mode>" + [(set (match_operand:VDQF 0 "register_operand") + (if_then_else:VDQF + (match_operator 3 "comparison_operator" + [(match_operand:VDQF 4 "register_operand") + (match_operand:VDQF 5 "nonmemory_operand")]) + (match_operand:VDQF 1 "register_operand") + (match_operand:VDQF 2 "register_operand")))] + "TARGET_SIMD" +{ + int inverse = 0; + rtx mask = gen_reg_rtx (<V_cmp_result>mode); + + if (!REG_P (operands[5]) + && (operands[5] != CONST0_RTX (<MODE>mode))) + operands[5] = force_reg (<MODE>mode, operands[5]); + + switch (GET_CODE (operands[3])) + { + case LT: + inverse = 1; + /* Fall through. */ + case GE: + emit_insn (gen_aarch64_cmge<mode> (mask, operands[4], operands[5])); + break; + case LE: + inverse = 1; + /* Fall through. */ + case GT: + emit_insn (gen_aarch64_cmgt<mode> (mask, operands[4], operands[5])); + break; + case NE: + inverse = 1; + /* Fall through. */ + case EQ: + emit_insn (gen_aarch64_cmeq<mode> (mask, operands[4], operands[5])); + break; + default: + gcc_unreachable (); + } + + if (inverse) + emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[2], + operands[1])); + else + emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[1], + operands[2])); + DONE; +}) + (define_expand "vcond<mode><mode>" - [(set (match_operand:VDQ 0 "register_operand") - (if_then_else:VDQ + [(set (match_operand:VALL 0 "register_operand") + (if_then_else:VALL (match_operator 3 "comparison_operator" - [(match_operand:VDQ 4 "register_operand") - (match_operand:VDQ 5 "nonmemory_operand")]) - (match_operand:VDQ 1 "register_operand") - (match_operand:VDQ 2 "register_operand")))] + [(match_operand:VALL 4 "register_operand") + (match_operand:VALL 5 "nonmemory_operand")]) + (match_operand:VALL 1 "register_operand") + (match_operand:VALL 2 "register_operand")))] "TARGET_SIMD" { emit_insn (gen_aarch64_vcond_internal<mode> (operands[0], operands[1], @@ -2866,6 +2917,22 @@ (set_attr "simd_mode" "<MODE>")] ) +;; fcm(eq|ge|le|lt|gt) + +(define_insn "aarch64_cm<cmp><mode>" + [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w") + (unspec:<V_cmp_result> + [(match_operand:VDQF 1 "register_operand" "w,w") + (match_operand:VDQF 2 "aarch64_simd_reg_or_zero" "w,Dz")] + VCMP_S))] + "TARGET_SIMD" + "@ + fcm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype> + fcm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, 0" + [(set_attr "simd_type" "simd_fcmp") + (set_attr "simd_mode" "<MODE>")] +) + ;; addp (define_insn "aarch64_addp<mode>" |