From 385eb93d42f33bf62a78e57a925a05d4d9715b80 Mon Sep 17 00:00:00 2001 From: James Greenhalgh Date: Tue, 8 Jan 2013 14:57:33 +0000 Subject: [AARCH64] Add support for floating-point vcond. gcc/ * config/aarch64/aarch64-simd.md (aarch64_simd_bsl_internal): Add floating-point modes. (aarch64_simd_bsl): Likewise. (aarch64_vcond_internal): Likewise. (vcond): Likewise. (aarch64_cm): Fix constraints, add new modes. * config/aarch64/iterators.md (V_cmp_result): Add V2DF. gcc/testsuite/ * gcc/testsuite/gcc.target/aarch64/vect-fcm-eq-d.c: New. * gcc/testsuite/gcc.target/aarch64/vect-fcm-eq-f.c: Likewise. * gcc/testsuite/gcc.target/aarch64/vect-fcm-ge-d.c: Likewise. * gcc/testsuite/gcc.target/aarch64/vect-fcm-ge-f.c: Likewise. * gcc/testsuite/gcc.target/aarch64/vect-fcm-gt-d.c: Likewise. * gcc/testsuite/gcc.target/aarch64/vect-fcm-gt-f.c: Likewise. * gcc/testsuite/gcc.target/aarch64/vect-fcm.x: Likewise. * gcc/testsuite/lib/target-supports.exp (check_effective_target_vect_cond): Enable for AArch64. From-SVN: r195018 --- gcc/config/aarch64/aarch64-simd.md | 103 ++++++++++++++++++++++++++++++------- 1 file changed, 85 insertions(+), 18 deletions(-) (limited to 'gcc/config') diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index d4b52c3..e6655e8 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -1463,7 +1463,7 @@ (set_attr "simd_mode" "V2SI")] ) -;; vbsl_* intrinsics may compile to any of vbsl/vbif/vbit depending on register +;; vbsl_* intrinsics may compile to any of bsl/bif/bit depending on register ;; allocation. For an intrinsic of form: ;; vD = bsl_* (vS, vN, vM) ;; We can use any of: @@ -1472,11 +1472,12 @@ ;; bif vD, vM, vS (if D = N, so 0-bits in vS choose bits from vM, else vN) (define_insn "aarch64_simd_bsl_internal" - [(set (match_operand:VDQ 0 "register_operand" "=w,w,w") - (unspec:VDQ [(match_operand:VDQ 1 "register_operand" " 0,w,w") - (match_operand:VDQ 2 "register_operand" " w,w,0") - (match_operand:VDQ 3 "register_operand" " w,0,w")] - UNSPEC_BSL))] + [(set (match_operand:VALL 0 "register_operand" "=w,w,w") + (unspec:VALL + [(match_operand: 1 "register_operand" " 0,w,w") + (match_operand:VALL 2 "register_operand" " w,w,0") + (match_operand:VALL 3 "register_operand" " w,0,w")] + UNSPEC_BSL))] "TARGET_SIMD" "@ bsl\\t%0., %2., %3. @@ -1485,15 +1486,15 @@ ) (define_expand "aarch64_simd_bsl" - [(set (match_operand:VDQ 0 "register_operand") - (unspec:VDQ [(match_operand: 1 "register_operand") - (match_operand:VDQ 2 "register_operand") - (match_operand:VDQ 3 "register_operand")] - UNSPEC_BSL))] + [(set (match_operand:VALL 0 "register_operand") + (unspec:VALL [(match_operand: 1 "register_operand") + (match_operand:VALL 2 "register_operand") + (match_operand:VALL 3 "register_operand")] + UNSPEC_BSL))] "TARGET_SIMD" { /* We can't alias operands together if they have different modes. */ - operands[1] = gen_lowpart (mode, operands[1]); + operands[1] = gen_lowpart (mode, operands[1]); }) (define_expand "aarch64_vcond_internal" @@ -1574,14 +1575,64 @@ DONE; }) +(define_expand "aarch64_vcond_internal" + [(set (match_operand:VDQF 0 "register_operand") + (if_then_else:VDQF + (match_operator 3 "comparison_operator" + [(match_operand:VDQF 4 "register_operand") + (match_operand:VDQF 5 "nonmemory_operand")]) + (match_operand:VDQF 1 "register_operand") + (match_operand:VDQF 2 "register_operand")))] + "TARGET_SIMD" +{ + int inverse = 0; + rtx mask = gen_reg_rtx (mode); + + if (!REG_P (operands[5]) + && (operands[5] != CONST0_RTX (mode))) + operands[5] = force_reg (mode, operands[5]); + + switch (GET_CODE (operands[3])) + { + case LT: + inverse = 1; + /* Fall through. */ + case GE: + emit_insn (gen_aarch64_cmge (mask, operands[4], operands[5])); + break; + case LE: + inverse = 1; + /* Fall through. */ + case GT: + emit_insn (gen_aarch64_cmgt (mask, operands[4], operands[5])); + break; + case NE: + inverse = 1; + /* Fall through. */ + case EQ: + emit_insn (gen_aarch64_cmeq (mask, operands[4], operands[5])); + break; + default: + gcc_unreachable (); + } + + if (inverse) + emit_insn (gen_aarch64_simd_bsl (operands[0], mask, operands[2], + operands[1])); + else + emit_insn (gen_aarch64_simd_bsl (operands[0], mask, operands[1], + operands[2])); + DONE; +}) + (define_expand "vcond" - [(set (match_operand:VDQ 0 "register_operand") - (if_then_else:VDQ + [(set (match_operand:VALL 0 "register_operand") + (if_then_else:VALL (match_operator 3 "comparison_operator" - [(match_operand:VDQ 4 "register_operand") - (match_operand:VDQ 5 "nonmemory_operand")]) - (match_operand:VDQ 1 "register_operand") - (match_operand:VDQ 2 "register_operand")))] + [(match_operand:VALL 4 "register_operand") + (match_operand:VALL 5 "nonmemory_operand")]) + (match_operand:VALL 1 "register_operand") + (match_operand:VALL 2 "register_operand")))] "TARGET_SIMD" { emit_insn (gen_aarch64_vcond_internal (operands[0], operands[1], @@ -2866,6 +2917,22 @@ (set_attr "simd_mode" "")] ) +;; fcm(eq|ge|le|lt|gt) + +(define_insn "aarch64_cm" + [(set (match_operand: 0 "register_operand" "=w,w") + (unspec: + [(match_operand:VDQF 1 "register_operand" "w,w") + (match_operand:VDQF 2 "aarch64_simd_reg_or_zero" "w,Dz")] + VCMP_S))] + "TARGET_SIMD" + "@ + fcm\t%0, %1, %2 + fcm\t%0, %1, 0" + [(set_attr "simd_type" "simd_fcmp") + (set_attr "simd_mode" "")] +) + ;; addp (define_insn "aarch64_addp" -- cgit v1.1