;; Machine description for AArch64 SVE. ;; Copyright (C) 2009-2019 Free Software Foundation, Inc. ;; Contributed by ARM Ltd. ;; ;; This file is part of GCC. ;; ;; GCC is free software; you can redistribute it and/or modify it ;; under the terms of the GNU General Public License as published by ;; the Free Software Foundation; either version 3, or (at your option) ;; any later version. ;; ;; GCC is distributed in the hope that it will be useful, but ;; WITHOUT ANY WARRANTY; without even the implied warranty of ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ;; General Public License for more details. ;; ;; You should have received a copy of the GNU General Public License ;; along with GCC; see the file COPYING3. If not see ;; . ;; The file is organised into the following sections (search for the full ;; line): ;; ;; == General notes ;; ---- Note on the handling of big-endian SVE ;; ;; == Moves ;; ---- Moves of single vectors ;; ---- Moves of multiple vectors ;; ---- Moves of predicates ;; ;; == Loads ;; ---- Normal contiguous loads ;; ---- Normal gather loads ;; ;; == Stores ;; ---- Normal contiguous stores ;; ---- Normal scatter stores ;; ;; == Vector creation ;; ---- [INT,FP] Duplicate element ;; ---- [INT,FP] Initialize from individual elements ;; ---- [INT] Linear series ;; ---- [PRED] Duplicate element ;; ;; == Vector decomposition ;; ---- [INT,FP] Extract index ;; ---- [INT,FP] Extract active element ;; ---- [PRED] Extract index ;; ;; == Unary arithmetic ;; ---- [INT] General unary arithmetic corresponding to rtx codes ;; ---- [FP] General unary arithmetic corresponding to unspecs ;; ---- [PRED] Inverse ;; == Binary arithmetic ;; ---- [INT] General binary arithmetic corresponding to rtx codes ;; ---- [INT] Addition ;; ---- [INT] Subtraction ;; ---- [INT] Absolute difference ;; ---- [INT] Multiplication ;; ---- [INT] Highpart multiplication ;; ---- [INT] Division ;; ---- [INT] Binary logical operations ;; ---- [INT] Binary logical operations (inverted second input) ;; ---- [INT] Shifts ;; ---- [INT] Maximum and minimum ;; ---- [FP] General binary arithmetic corresponding to rtx codes ;; ---- [FP] General binary arithmetic corresponding to unspecs ;; ---- [FP] Addition ;; ---- [FP] Subtraction ;; ---- [FP] Absolute difference ;; ---- [FP] Multiplication ;; ---- [FP] Division ;; ---- [FP] Binary logical operations ;; ---- [FP] Sign copying ;; ---- [FP] Maximum and minimum ;; ---- [PRED] Binary logical operations ;; ---- [PRED] Binary logical operations (inverted second input) ;; ---- [PRED] Binary logical operations (inverted result) ;; ;; == Ternary arithmetic ;; ---- [INT] MLA and MAD ;; ---- [INT] MLS and MSB ;; ---- [INT] Dot product ;; ---- [INT] Sum of absolute differences ;; ---- [FP] General ternary arithmetic corresponding to unspecs ;; ;; == Comparisons and selects ;; ---- [INT,FP] Select based on predicates ;; ---- [INT,FP] Compare and select ;; ---- [INT] Comparisons ;; ---- [INT] While tests ;; ---- [FP] Comparisons ;; ---- [PRED] Test bits ;; ;; == Reductions ;; ---- [INT,FP] Conditional reductions ;; ---- [INT] Tree reductions ;; ---- [FP] Tree reductions ;; ---- [FP] Left-to-right reductions ;; ;; == Permutes ;; ---- [INT,FP] General permutes ;; ---- [INT,FP] Special-purpose unary permutes ;; ---- [INT,FP] Special-purpose binary permutes ;; ---- [PRED] Special-purpose binary permutes ;; ;; == Conversions ;; ---- [INT<-INT] Packs ;; ---- [INT<-INT] Unpacks ;; ---- [INT<-FP] Conversions ;; ---- [INT<-FP] Packs ;; ---- [INT<-FP] Unpacks ;; ---- [FP<-INT] Conversions ;; ---- [FP<-INT] Packs ;; ---- [FP<-INT] Unpacks ;; ---- [FP<-FP] Packs ;; ---- [FP<-FP] Unpacks ;; ---- [PRED<-PRED] Packs ;; ---- [PRED<-PRED] Unpacks ;; ========================================================================= ;; == General notes ;; ========================================================================= ;; ;; ------------------------------------------------------------------------- ;; ---- Note on the handling of big-endian SVE ;; ------------------------------------------------------------------------- ;; ;; On big-endian systems, Advanced SIMD mov patterns act in the ;; same way as movdi or movti would: the first byte of memory goes ;; into the most significant byte of the register and the last byte ;; of memory goes into the least significant byte of the register. ;; This is the most natural ordering for Advanced SIMD and matches ;; the ABI layout for 64-bit and 128-bit vector types. ;; ;; As a result, the order of bytes within the register is what GCC ;; expects for a big-endian target, and subreg offsets therefore work ;; as expected, with the first element in memory having subreg offset 0 ;; and the last element in memory having the subreg offset associated ;; with a big-endian lowpart. However, this ordering also means that ;; GCC's lane numbering does not match the architecture's numbering: ;; GCC always treats the element at the lowest address in memory ;; (subreg offset 0) as element 0, while the architecture treats ;; the least significant end of the register as element 0. ;; ;; The situation for SVE is different. We want the layout of the ;; SVE register to be same for mov as it is for maskload: ;; logically, a mov load must be indistinguishable from a ;; maskload whose mask is all true. We therefore need the ;; register layout to match LD1 rather than LDR. The ABI layout of ;; SVE types also matches LD1 byte ordering rather than LDR byte ordering. ;; ;; As a result, the architecture lane numbering matches GCC's lane ;; numbering, with element 0 always being the first in memory. ;; However: ;; ;; - Applying a subreg offset to a register does not give the element ;; that GCC expects: the first element in memory has the subreg offset ;; associated with a big-endian lowpart while the last element in memory ;; has subreg offset 0. We handle this via TARGET_CAN_CHANGE_MODE_CLASS. ;; ;; - We cannot use LDR and STR for spill slots that might be accessed ;; via subregs, since although the elements have the order GCC expects, ;; the order of the bytes within the elements is different. We instead ;; access spill slots via LD1 and ST1, using secondary reloads to ;; reserve a predicate register. ;; ========================================================================= ;; == Moves ;; ========================================================================= ;; ------------------------------------------------------------------------- ;; ---- Moves of single vectors ;; ------------------------------------------------------------------------- ;; Includes: ;; - MOV (including aliases) ;; - LD1B (contiguous form) ;; - LD1D ( " " ) ;; - LD1H ( " " ) ;; - LD1W ( " " ) ;; - LDR ;; - ST1B (contiguous form) ;; - ST1D ( " " ) ;; - ST1H ( " " ) ;; - ST1W ( " " ) ;; - STR ;; ------------------------------------------------------------------------- (define_expand "mov" [(set (match_operand:SVE_ALL 0 "nonimmediate_operand") (match_operand:SVE_ALL 1 "general_operand"))] "TARGET_SVE" { /* Use the predicated load and store patterns where possible. This is required for big-endian targets (see the comment at the head of the file) and increases the addressing choices for little-endian. */ if ((MEM_P (operands[0]) || MEM_P (operands[1])) && can_create_pseudo_p ()) { aarch64_expand_sve_mem_move (operands[0], operands[1], mode); DONE; } if (CONSTANT_P (operands[1])) { aarch64_expand_mov_immediate (operands[0], operands[1], gen_vec_duplicate); DONE; } /* Optimize subregs on big-endian targets: we can use REV[BHW] instead of going through memory. */ if (BYTES_BIG_ENDIAN && aarch64_maybe_expand_sve_subreg_move (operands[0], operands[1])) DONE; } ) (define_expand "movmisalign" [(set (match_operand:SVE_ALL 0 "nonimmediate_operand") (match_operand:SVE_ALL 1 "general_operand"))] "TARGET_SVE" { /* Equivalent to a normal move for our purpooses. */ emit_move_insn (operands[0], operands[1]); DONE; } ) ;; Unpredicated moves (little-endian). Only allow memory operations ;; during and after RA; before RA we want the predicated load and ;; store patterns to be used instead. (define_insn "*aarch64_sve_mov_le" [(set (match_operand:SVE_ALL 0 "aarch64_sve_nonimmediate_operand" "=w, Utr, w, w") (match_operand:SVE_ALL 1 "aarch64_sve_general_operand" "Utr, w, w, Dn"))] "TARGET_SVE && !BYTES_BIG_ENDIAN && ((lra_in_progress || reload_completed) || (register_operand (operands[0], mode) && nonmemory_operand (operands[1], mode)))" "@ ldr\t%0, %1 str\t%1, %0 mov\t%0.d, %1.d * return aarch64_output_sve_mov_immediate (operands[1]);" ) ;; Unpredicated moves (big-endian). Memory accesses require secondary ;; reloads. (define_insn "*aarch64_sve_mov_be" [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w") (match_operand:SVE_ALL 1 "aarch64_nonmemory_operand" "w, Dn"))] "TARGET_SVE && BYTES_BIG_ENDIAN" "@ mov\t%0.d, %1.d * return aarch64_output_sve_mov_immediate (operands[1]);" ) ;; Handle big-endian memory reloads. We use byte PTRUE for all modes ;; to try to encourage reuse. ;; This pattern needs constraints due to TARGET_SECONDARY_RELOAD hook. (define_expand "aarch64_sve_reload_be" [(parallel [(set (match_operand 0) (match_operand 1)) (clobber (match_operand:VNx16BI 2 "register_operand" "=Upl"))])] "TARGET_SVE && BYTES_BIG_ENDIAN" { /* Create a PTRUE. */ emit_move_insn (operands[2], CONSTM1_RTX (VNx16BImode)); /* Refer to the PTRUE in the appropriate mode for this move. */ machine_mode mode = GET_MODE (operands[0]); machine_mode pred_mode = aarch64_sve_pred_mode (GET_MODE_UNIT_SIZE (mode)).require (); rtx pred = gen_lowpart (pred_mode, operands[2]); /* Emit a predicated load or store. */ aarch64_emit_sve_pred_move (operands[0], pred, operands[1]); DONE; } ) ;; A predicated move in which the predicate is known to be all-true. ;; Note that this pattern is generated directly by aarch64_emit_sve_pred_move, ;; so changes to this pattern will need changes there as well. (define_insn_and_split "@aarch64_pred_mov" [(set (match_operand:SVE_ALL 0 "nonimmediate_operand" "=w, w, m") (unspec:SVE_ALL [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (match_operand:SVE_ALL 2 "nonimmediate_operand" "w, m, w")] UNSPEC_MERGE_PTRUE))] "TARGET_SVE && (register_operand (operands[0], mode) || register_operand (operands[2], mode))" "@ # ld1\t%0., %1/z, %2 st1\t%2., %1, %0" "&& register_operand (operands[0], mode) && register_operand (operands[2], mode)" [(set (match_dup 0) (match_dup 2))] ) ;; A pattern for optimizing SUBREGs that have a reinterpreting effect ;; on big-endian targets; see aarch64_maybe_expand_sve_subreg_move ;; for details. We use a special predicate for operand 2 to reduce ;; the number of patterns. (define_insn_and_split "*aarch64_sve_mov_subreg_be" [(set (match_operand:SVE_ALL 0 "aarch64_sve_nonimmediate_operand" "=w") (unspec:SVE_ALL [(match_operand:VNx16BI 1 "register_operand" "Upl") (match_operand 2 "aarch64_any_register_operand" "w")] UNSPEC_REV_SUBREG))] "TARGET_SVE && BYTES_BIG_ENDIAN" "#" "&& reload_completed" [(const_int 0)] { aarch64_split_sve_subreg_move (operands[0], operands[1], operands[2]); DONE; } ) ;; ------------------------------------------------------------------------- ;; ---- Moves of multiple vectors ;; ------------------------------------------------------------------------- ;; All patterns in this section are synthetic and split to real ;; instructions after reload. ;; ------------------------------------------------------------------------- (define_expand "mov" [(set (match_operand:SVE_STRUCT 0 "nonimmediate_operand") (match_operand:SVE_STRUCT 1 "general_operand"))] "TARGET_SVE" { /* Big-endian loads and stores need to be done via LD1 and ST1; see the comment at the head of the file for details. */ if ((MEM_P (operands[0]) || MEM_P (operands[1])) && BYTES_BIG_ENDIAN) { gcc_assert (can_create_pseudo_p ()); aarch64_expand_sve_mem_move (operands[0], operands[1], mode); DONE; } if (CONSTANT_P (operands[1])) { aarch64_expand_mov_immediate (operands[0], operands[1]); DONE; } } ) ;; Unpredicated structure moves (little-endian). (define_insn "*aarch64_sve_mov_le" [(set (match_operand:SVE_STRUCT 0 "aarch64_sve_nonimmediate_operand" "=w, Utr, w, w") (match_operand:SVE_STRUCT 1 "aarch64_sve_general_operand" "Utr, w, w, Dn"))] "TARGET_SVE && !BYTES_BIG_ENDIAN" "#" [(set_attr "length" "")] ) ;; Unpredicated structure moves (big-endian). Memory accesses require ;; secondary reloads. (define_insn "*aarch64_sve_mov_be" [(set (match_operand:SVE_STRUCT 0 "register_operand" "=w, w") (match_operand:SVE_STRUCT 1 "aarch64_nonmemory_operand" "w, Dn"))] "TARGET_SVE && BYTES_BIG_ENDIAN" "#" [(set_attr "length" "")] ) ;; Split unpredicated structure moves into pieces. This is the same ;; for both big-endian and little-endian code, although it only needs ;; to handle memory operands for little-endian code. (define_split [(set (match_operand:SVE_STRUCT 0 "aarch64_sve_nonimmediate_operand") (match_operand:SVE_STRUCT 1 "aarch64_sve_general_operand"))] "TARGET_SVE && reload_completed" [(const_int 0)] { rtx dest = operands[0]; rtx src = operands[1]; if (REG_P (dest) && REG_P (src)) aarch64_simd_emit_reg_reg_move (operands, mode, ); else for (unsigned int i = 0; i < ; ++i) { rtx subdest = simplify_gen_subreg (mode, dest, mode, i * BYTES_PER_SVE_VECTOR); rtx subsrc = simplify_gen_subreg (mode, src, mode, i * BYTES_PER_SVE_VECTOR); emit_insn (gen_rtx_SET (subdest, subsrc)); } DONE; } ) ;; Predicated structure moves. This works for both endiannesses but in ;; practice is only useful for big-endian. (define_insn_and_split "@aarch64_pred_mov" [(set (match_operand:SVE_STRUCT 0 "aarch64_sve_struct_nonimmediate_operand" "=w, w, Utx") (unspec:SVE_STRUCT [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (match_operand:SVE_STRUCT 2 "aarch64_sve_struct_nonimmediate_operand" "w, Utx, w")] UNSPEC_MERGE_PTRUE))] "TARGET_SVE && (register_operand (operands[0], mode) || register_operand (operands[2], mode))" "#" "&& reload_completed" [(const_int 0)] { for (unsigned int i = 0; i < ; ++i) { rtx subdest = simplify_gen_subreg (mode, operands[0], mode, i * BYTES_PER_SVE_VECTOR); rtx subsrc = simplify_gen_subreg (mode, operands[2], mode, i * BYTES_PER_SVE_VECTOR); aarch64_emit_sve_pred_move (subdest, operands[1], subsrc); } DONE; } [(set_attr "length" "")] ) ;; ------------------------------------------------------------------------- ;; ---- Moves of predicates ;; ------------------------------------------------------------------------- ;; Includes: ;; - MOV ;; - LDR ;; - PFALSE ;; - PTRUE ;; - STR ;; ------------------------------------------------------------------------- (define_expand "mov" [(set (match_operand:PRED_ALL 0 "nonimmediate_operand") (match_operand:PRED_ALL 1 "general_operand"))] "TARGET_SVE" { if (GET_CODE (operands[0]) == MEM) operands[1] = force_reg (mode, operands[1]); } ) (define_insn "*aarch64_sve_mov" [(set (match_operand:PRED_ALL 0 "nonimmediate_operand" "=Upa, m, Upa, Upa, Upa") (match_operand:PRED_ALL 1 "general_operand" "Upa, Upa, m, Dz, Dm"))] "TARGET_SVE && (register_operand (operands[0], mode) || register_operand (operands[1], mode))" "@ mov\t%0.b, %1.b str\t%1, %0 ldr\t%0, %1 pfalse\t%0.b * return aarch64_output_ptrue (mode, '');" ) ;; ========================================================================= ;; == Loads ;; ========================================================================= ;; ------------------------------------------------------------------------- ;; ---- Normal contiguous loads ;; ------------------------------------------------------------------------- ;; Includes contiguous forms of: ;; - LD1B ;; - LD1D ;; - LD1H ;; - LD1W ;; - LD2B ;; - LD2D ;; - LD2H ;; - LD2W ;; - LD3B ;; - LD3D ;; - LD3H ;; - LD3W ;; - LD4B ;; - LD4D ;; - LD4H ;; - LD4W ;; ------------------------------------------------------------------------- ;; Predicated LD1. (define_insn "maskload" [(set (match_operand:SVE_ALL 0 "register_operand" "=w") (unspec:SVE_ALL [(match_operand: 2 "register_operand" "Upl") (match_operand:SVE_ALL 1 "memory_operand" "m")] UNSPEC_LD1_SVE))] "TARGET_SVE" "ld1\t%0., %2/z, %1" ) ;; Unpredicated LD[234]. (define_expand "vec_load_lanes" [(set (match_operand:SVE_STRUCT 0 "register_operand") (unspec:SVE_STRUCT [(match_dup 2) (match_operand:SVE_STRUCT 1 "memory_operand")] UNSPEC_LDN))] "TARGET_SVE" { operands[2] = aarch64_ptrue_reg (mode); } ) ;; Predicated LD[234]. (define_insn "vec_mask_load_lanes" [(set (match_operand:SVE_STRUCT 0 "register_operand" "=w") (unspec:SVE_STRUCT [(match_operand: 2 "register_operand" "Upl") (match_operand:SVE_STRUCT 1 "memory_operand" "m")] UNSPEC_LDN))] "TARGET_SVE" "ld\t%0, %2/z, %1" ) ;; ------------------------------------------------------------------------- ;; ---- Normal gather loads ;; ------------------------------------------------------------------------- ;; Includes gather forms of: ;; - LD1D ;; - LD1W ;; ------------------------------------------------------------------------- ;; Unpredicated gather loads. (define_expand "gather_load" [(set (match_operand:SVE_SD 0 "register_operand") (unspec:SVE_SD [(match_dup 5) (match_operand:DI 1 "aarch64_reg_or_zero") (match_operand: 2 "register_operand") (match_operand:DI 3 "const_int_operand") (match_operand:DI 4 "aarch64_gather_scale_operand_") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" { operands[5] = aarch64_ptrue_reg (mode); } ) ;; Predicated gather loads for 32-bit elements. Operand 3 is true for ;; unsigned extension and false for signed extension. (define_insn "mask_gather_load" [(set (match_operand:SVE_S 0 "register_operand" "=w, w, w, w, w") (unspec:SVE_S [(match_operand: 5 "register_operand" "Upl, Upl, Upl, Upl, Upl") (match_operand:DI 1 "aarch64_reg_or_zero" "Z, rk, rk, rk, rk") (match_operand: 2 "register_operand" "w, w, w, w, w") (match_operand:DI 3 "const_int_operand" "i, Z, Ui1, Z, Ui1") (match_operand:DI 4 "aarch64_gather_scale_operand_w" "Ui1, Ui1, Ui1, i, i") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" "@ ld1w\t%0.s, %5/z, [%2.s] ld1w\t%0.s, %5/z, [%1, %2.s, sxtw] ld1w\t%0.s, %5/z, [%1, %2.s, uxtw] ld1w\t%0.s, %5/z, [%1, %2.s, sxtw %p4] ld1w\t%0.s, %5/z, [%1, %2.s, uxtw %p4]" ) ;; Predicated gather loads for 64-bit elements. The value of operand 3 ;; doesn't matter in this case. (define_insn "mask_gather_load" [(set (match_operand:SVE_D 0 "register_operand" "=w, w, w") (unspec:SVE_D [(match_operand: 5 "register_operand" "Upl, Upl, Upl") (match_operand:DI 1 "aarch64_reg_or_zero" "Z, rk, rk") (match_operand: 2 "register_operand" "w, w, w") (match_operand:DI 3 "const_int_operand") (match_operand:DI 4 "aarch64_gather_scale_operand_d" "Ui1, Ui1, i") (mem:BLK (scratch))] UNSPEC_LD1_GATHER))] "TARGET_SVE" "@ ld1d\t%0.d, %5/z, [%2.d] ld1d\t%0.d, %5/z, [%1, %2.d] ld1d\t%0.d, %5/z, [%1, %2.d, lsl %p4]" ) ;; ========================================================================= ;; == Stores ;; ========================================================================= ;; ------------------------------------------------------------------------- ;; ---- Normal contiguous stores ;; ------------------------------------------------------------------------- ;; Includes contiguous forms of: ;; - ST1B ;; - ST1D ;; - ST1H ;; - ST1W ;; - ST2B ;; - ST2D ;; - ST2H ;; - ST2W ;; - ST3B ;; - ST3D ;; - ST3H ;; - ST3W ;; - ST4B ;; - ST4D ;; - ST4H ;; - ST4W ;; ------------------------------------------------------------------------- ;; Predicated ST1. (define_insn "maskstore" [(set (match_operand:SVE_ALL 0 "memory_operand" "+m") (unspec:SVE_ALL [(match_operand: 2 "register_operand" "Upl") (match_operand:SVE_ALL 1 "register_operand" "w") (match_dup 0)] UNSPEC_ST1_SVE))] "TARGET_SVE" "st1\t%1., %2, %0" ) ;; Unpredicated ST[234]. This is always a full update, so the dependence ;; on the old value of the memory location (via (match_dup 0)) is redundant. ;; There doesn't seem to be any obvious benefit to treating the all-true ;; case differently though. In particular, it's very unlikely that we'll ;; only find out during RTL that a store_lanes is dead. (define_expand "vec_store_lanes" [(set (match_operand:SVE_STRUCT 0 "memory_operand") (unspec:SVE_STRUCT [(match_dup 2) (match_operand:SVE_STRUCT 1 "register_operand") (match_dup 0)] UNSPEC_STN))] "TARGET_SVE" { operands[2] = aarch64_ptrue_reg (mode); } ) ;; Predicated ST[234]. (define_insn "vec_mask_store_lanes" [(set (match_operand:SVE_STRUCT 0 "memory_operand" "+m") (unspec:SVE_STRUCT [(match_operand: 2 "register_operand" "Upl") (match_operand:SVE_STRUCT 1 "register_operand" "w") (match_dup 0)] UNSPEC_STN))] "TARGET_SVE" "st\t%1, %2, %0" ) ;; ------------------------------------------------------------------------- ;; ---- Normal scatter stores ;; ------------------------------------------------------------------------- ;; Includes scatter forms of: ;; - ST1D ;; - ST1W ;; ------------------------------------------------------------------------- ;; Unpredicated scatter stores. (define_expand "scatter_store" [(set (mem:BLK (scratch)) (unspec:BLK [(match_dup 5) (match_operand:DI 0 "aarch64_reg_or_zero") (match_operand: 1 "register_operand") (match_operand:DI 2 "const_int_operand") (match_operand:DI 3 "aarch64_gather_scale_operand_") (match_operand:SVE_SD 4 "register_operand")] UNSPEC_ST1_SCATTER))] "TARGET_SVE" { operands[5] = aarch64_ptrue_reg (mode); } ) ;; Predicated scatter stores for 32-bit elements. Operand 2 is true for ;; unsigned extension and false for signed extension. (define_insn "mask_scatter_store" [(set (mem:BLK (scratch)) (unspec:BLK [(match_operand: 5 "register_operand" "Upl, Upl, Upl, Upl, Upl") (match_operand:DI 0 "aarch64_reg_or_zero" "Z, rk, rk, rk, rk") (match_operand: 1 "register_operand" "w, w, w, w, w") (match_operand:DI 2 "const_int_operand" "i, Z, Ui1, Z, Ui1") (match_operand:DI 3 "aarch64_gather_scale_operand_w" "Ui1, Ui1, Ui1, i, i") (match_operand:SVE_S 4 "register_operand" "w, w, w, w, w")] UNSPEC_ST1_SCATTER))] "TARGET_SVE" "@ st1w\t%4.s, %5, [%1.s] st1w\t%4.s, %5, [%0, %1.s, sxtw] st1w\t%4.s, %5, [%0, %1.s, uxtw] st1w\t%4.s, %5, [%0, %1.s, sxtw %p3] st1w\t%4.s, %5, [%0, %1.s, uxtw %p3]" ) ;; Predicated scatter stores for 64-bit elements. The value of operand 2 ;; doesn't matter in this case. (define_insn "mask_scatter_store" [(set (mem:BLK (scratch)) (unspec:BLK [(match_operand: 5 "register_operand" "Upl, Upl, Upl") (match_operand:DI 0 "aarch64_reg_or_zero" "Z, rk, rk") (match_operand: 1 "register_operand" "w, w, w") (match_operand:DI 2 "const_int_operand") (match_operand:DI 3 "aarch64_gather_scale_operand_d" "Ui1, Ui1, i") (match_operand:SVE_D 4 "register_operand" "w, w, w")] UNSPEC_ST1_SCATTER))] "TARGET_SVE" "@ st1d\t%4.d, %5, [%1.d] st1d\t%4.d, %5, [%0, %1.d] st1d\t%4.d, %5, [%0, %1.d, lsl %p3]" ) ;; ========================================================================= ;; == Vector creation ;; ========================================================================= ;; ------------------------------------------------------------------------- ;; ---- [INT,FP] Duplicate element ;; ------------------------------------------------------------------------- ;; Includes: ;; - MOV ;; - LD1RB ;; - LD1RD ;; - LD1RH ;; - LD1RW ;; - LD1RQB ;; - LD1RQD ;; - LD1RQH ;; - LD1RQW ;; ------------------------------------------------------------------------- (define_expand "vec_duplicate" [(parallel [(set (match_operand:SVE_ALL 0 "register_operand") (vec_duplicate:SVE_ALL (match_operand: 1 "aarch64_sve_dup_operand"))) (clobber (scratch:))])] "TARGET_SVE" { if (MEM_P (operands[1])) { rtx ptrue = aarch64_ptrue_reg (mode); emit_insn (gen_sve_ld1r (operands[0], ptrue, operands[1], CONST0_RTX (mode))); DONE; } } ) ;; Accept memory operands for the benefit of combine, and also in case ;; the scalar input gets spilled to memory during RA. We want to split ;; the load at the first opportunity in order to allow the PTRUE to be ;; optimized with surrounding code. (define_insn_and_split "*vec_duplicate_reg" [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w, w") (vec_duplicate:SVE_ALL (match_operand: 1 "aarch64_sve_dup_operand" "r, w, Uty"))) (clobber (match_scratch: 2 "=X, X, Upl"))] "TARGET_SVE" "@ mov\t%0., %1 mov\t%0., %1 #" "&& MEM_P (operands[1])" [(const_int 0)] { if (GET_CODE (operands[2]) == SCRATCH) operands[2] = gen_reg_rtx (mode); emit_move_insn (operands[2], CONSTM1_RTX (mode)); emit_insn (gen_sve_ld1r (operands[0], operands[2], operands[1], CONST0_RTX (mode))); DONE; } [(set_attr "length" "4,4,8")] ) ;; This is used for vec_duplicates from memory, but can also ;; be used by combine to optimize selects of a a vec_duplicate ;; with zero. (define_insn "sve_ld1r" [(set (match_operand:SVE_ALL 0 "register_operand" "=w") (unspec:SVE_ALL [(match_operand: 1 "register_operand" "Upl") (vec_duplicate:SVE_ALL (match_operand: 2 "aarch64_sve_ld1r_operand" "Uty")) (match_operand:SVE_ALL 3 "aarch64_simd_imm_zero")] UNSPEC_SEL))] "TARGET_SVE" "ld1r\t%0., %1/z, %2" ) ;; Load 128 bits from memory and duplicate to fill a vector. Since there ;; are so few operations on 128-bit "elements", we don't define a VNx1TI ;; and simply use vectors of bytes instead. (define_insn "*sve_ld1rq" [(set (match_operand:SVE_ALL 0 "register_operand" "=w") (unspec:SVE_ALL [(match_operand: 1 "register_operand" "Upl") (match_operand:TI 2 "aarch64_sve_ld1r_operand" "Uty")] UNSPEC_LD1RQ))] "TARGET_SVE" "ld1rq\t%0., %1/z, %2" ) ;; ------------------------------------------------------------------------- ;; ---- [INT,FP] Initialize from individual elements ;; ------------------------------------------------------------------------- ;; Includes: ;; - INSR ;; ------------------------------------------------------------------------- (define_expand "vec_init" [(match_operand:SVE_ALL 0 "register_operand") (match_operand 1 "")] "TARGET_SVE" { aarch64_sve_expand_vector_init (operands[0], operands[1]); DONE; } ) ;; Shift an SVE vector left and insert a scalar into element 0. (define_insn "vec_shl_insert_" [(set (match_operand:SVE_ALL 0 "register_operand" "=?w, w, ??&w, ?&w") (unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand" "0, 0, w, w") (match_operand: 2 "aarch64_reg_or_zero" "rZ, w, rZ, w")] UNSPEC_INSR))] "TARGET_SVE" "@ insr\t%0., %2 insr\t%0., %2 movprfx\t%0, %1\;insr\t%0., %2 movprfx\t%0, %1\;insr\t%0., %2" [(set_attr "movprfx" "*,*,yes,yes")] ) ;; ------------------------------------------------------------------------- ;; ---- [INT] Linear series ;; ------------------------------------------------------------------------- ;; Includes: ;; - INDEX ;; ------------------------------------------------------------------------- (define_insn "vec_series" [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w") (vec_series:SVE_I (match_operand: 1 "aarch64_sve_index_operand" "Usi, r, r") (match_operand: 2 "aarch64_sve_index_operand" "r, Usi, r")))] "TARGET_SVE" "@ index\t%0., #%1, %2 index\t%0., %1, #%2 index\t%0., %1, %2" ) ;; Optimize {x, x, x, x, ...} + {0, n, 2*n, 3*n, ...} if n is in range ;; of an INDEX instruction. (define_insn "*vec_series_plus" [(set (match_operand:SVE_I 0 "register_operand" "=w") (plus:SVE_I (vec_duplicate:SVE_I (match_operand: 1 "register_operand" "r")) (match_operand:SVE_I 2 "immediate_operand")))] "TARGET_SVE && aarch64_check_zero_based_sve_index_immediate (operands[2])" { operands[2] = aarch64_check_zero_based_sve_index_immediate (operands[2]); return "index\t%0., %1, #%2"; } ) ;; ------------------------------------------------------------------------- ;; ---- [PRED] Duplicate element ;; ------------------------------------------------------------------------- ;; The patterns in this section are synthetic. ;; ------------------------------------------------------------------------- ;; Implement a predicate broadcast by shifting the low bit of the scalar ;; input into the top bit and using a WHILELO. An alternative would be to ;; duplicate the input and do a compare with zero. (define_expand "vec_duplicate" [(set (match_operand:PRED_ALL 0 "register_operand") (vec_duplicate:PRED_ALL (match_operand 1 "register_operand")))] "TARGET_SVE" { rtx tmp = gen_reg_rtx (DImode); rtx op1 = gen_lowpart (DImode, operands[1]); emit_insn (gen_ashldi3 (tmp, op1, gen_int_mode (63, DImode))); emit_insn (gen_while_ultdi (operands[0], const0_rtx, tmp)); DONE; } ) ;; ========================================================================= ;; == Vector decomposition ;; ========================================================================= ;; ------------------------------------------------------------------------- ;; ---- [INT,FP] Extract index ;; ------------------------------------------------------------------------- ;; Includes: ;; - DUP (Advanced SIMD) ;; - DUP (SVE) ;; - EXT (SVE) ;; - ST1 (Advanced SIMD) ;; - UMOV (Advanced SIMD) ;; ------------------------------------------------------------------------- (define_expand "vec_extract" [(set (match_operand: 0 "register_operand") (vec_select: (match_operand:SVE_ALL 1 "register_operand") (parallel [(match_operand:SI 2 "nonmemory_operand")])))] "TARGET_SVE" { poly_int64 val; if (poly_int_rtx_p (operands[2], &val) && known_eq (val, GET_MODE_NUNITS (mode) - 1)) { /* The last element can be extracted with a LASTB and a false predicate. */ rtx sel = aarch64_pfalse_reg (mode); emit_insn (gen_extract_last_ (operands[0], sel, operands[1])); DONE; } if (!CONST_INT_P (operands[2])) { /* Create an index with operand[2] as the base and -1 as the step. It will then be zero for the element we care about. */ rtx index = gen_lowpart (mode, operands[2]); index = force_reg (mode, index); rtx series = gen_reg_rtx (mode); emit_insn (gen_vec_series (series, index, constm1_rtx)); /* Get a predicate that is true for only that element. */ rtx zero = CONST0_RTX (mode); rtx cmp = gen_rtx_EQ (mode, series, zero); rtx sel = gen_reg_rtx (mode); emit_insn (gen_vec_cmp (sel, cmp, series, zero)); /* Select the element using LASTB. */ emit_insn (gen_extract_last_ (operands[0], sel, operands[1])); DONE; } } ) ;; Extract element zero. This is a special case because we want to force ;; the registers to be the same for the second alternative, and then ;; split the instruction into nothing after RA. (define_insn_and_split "*vec_extract_0" [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand" "=r, w, Utv") (vec_select: (match_operand:SVE_ALL 1 "register_operand" "w, 0, w") (parallel [(const_int 0)])))] "TARGET_SVE" { operands[1] = gen_rtx_REG (mode, REGNO (operands[1])); switch (which_alternative) { case 0: return "umov\\t%0, %1.[0]"; case 1: return "#"; case 2: return "st1\\t{%1.}[0], %0"; default: gcc_unreachable (); } } "&& reload_completed && REG_P (operands[0]) && REGNO (operands[0]) == REGNO (operands[1])" [(const_int 0)] { emit_note (NOTE_INSN_DELETED); DONE; } [(set_attr "type" "neon_to_gp_q, untyped, neon_store1_one_lane_q")] ) ;; Extract an element from the Advanced SIMD portion of the register. ;; We don't just reuse the aarch64-simd.md pattern because we don't ;; want any change in lane number on big-endian targets. (define_insn "*vec_extract_v128" [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand" "=r, w, Utv") (vec_select: (match_operand:SVE_ALL 1 "register_operand" "w, w, w") (parallel [(match_operand:SI 2 "const_int_operand")])))] "TARGET_SVE && IN_RANGE (INTVAL (operands[2]) * GET_MODE_SIZE (mode), 1, 15)" { operands[1] = gen_rtx_REG (mode, REGNO (operands[1])); switch (which_alternative) { case 0: return "umov\\t%0, %1.[%2]"; case 1: return "dup\\t%0, %1.[%2]"; case 2: return "st1\\t{%1.}[%2], %0"; default: gcc_unreachable (); } } [(set_attr "type" "neon_to_gp_q, neon_dup_q, neon_store1_one_lane_q")] ) ;; Extract an element in the range of DUP. This pattern allows the ;; source and destination to be different. (define_insn "*vec_extract_dup" [(set (match_operand: 0 "register_operand" "=w") (vec_select: (match_operand:SVE_ALL 1 "register_operand" "w") (parallel [(match_operand:SI 2 "const_int_operand")])))] "TARGET_SVE && IN_RANGE (INTVAL (operands[2]) * GET_MODE_SIZE (mode), 16, 63)" { operands[0] = gen_rtx_REG (mode, REGNO (operands[0])); return "dup\t%0., %1.[%2]"; } ) ;; Extract an element outside the range of DUP. This pattern requires the ;; source and destination to be the same. (define_insn "*vec_extract_ext" [(set (match_operand: 0 "register_operand" "=w") (vec_select: (match_operand:SVE_ALL 1 "register_operand" "0") (parallel [(match_operand:SI 2 "const_int_operand")])))] "TARGET_SVE && INTVAL (operands[2]) * GET_MODE_SIZE (mode) >= 64" { operands[0] = gen_rtx_REG (mode, REGNO (operands[0])); operands[2] = GEN_INT (INTVAL (operands[2]) * GET_MODE_SIZE (mode)); return "ext\t%0.b, %0.b, %0.b, #%2"; } ) ;; ------------------------------------------------------------------------- ;; ---- [INT,FP] Extract active element ;; ------------------------------------------------------------------------- ;; Includes: ;; - LASTB ;; ------------------------------------------------------------------------- ;; Extract the last active element of operand 1 into operand 0. ;; If no elements are active, extract the last inactive element instead. (define_insn "extract_last_" [(set (match_operand: 0 "register_operand" "=r, w") (unspec: [(match_operand: 1 "register_operand" "Upl, Upl") (match_operand:SVE_ALL 2 "register_operand" "w, w")] UNSPEC_LASTB))] "TARGET_SVE" "@ lastb\t%0, %1, %2. lastb\t%0, %1, %2." ) ;; ------------------------------------------------------------------------- ;; ---- [PRED] Extract index ;; ------------------------------------------------------------------------- ;; The patterns in this section are synthetic. ;; ------------------------------------------------------------------------- ;; Handle extractions from a predicate by converting to an integer vector ;; and extracting from there. (define_expand "vec_extract" [(match_operand: 0 "register_operand") (match_operand: 1 "register_operand") (match_operand:SI 2 "nonmemory_operand") ;; Dummy operand to which we can attach the iterator. (reg:SVE_I V0_REGNUM)] "TARGET_SVE" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_aarch64_sve_dup_const (tmp, operands[1], CONST1_RTX (mode), CONST0_RTX (mode))); emit_insn (gen_vec_extract (operands[0], tmp, operands[2])); DONE; } ) ;; ========================================================================= ;; == Unary arithmetic ;; ========================================================================= ;; ------------------------------------------------------------------------- ;; ---- [INT] General unary arithmetic corresponding to rtx codes ;; ------------------------------------------------------------------------- ;; Includes: ;; - ABS ;; - CNT (= popcount) ;; - NEG ;; - NOT ;; ------------------------------------------------------------------------- ;; Unpredicated integer unary arithmetic. (define_expand "2" [(set (match_operand:SVE_I 0 "register_operand") (unspec:SVE_I [(match_dup 2) (SVE_INT_UNARY:SVE_I (match_operand:SVE_I 1 "register_operand"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" { operands[2] = aarch64_ptrue_reg (mode); } ) ;; Integer unary arithmetic predicated with a PTRUE. (define_insn "*2" [(set (match_operand:SVE_I 0 "register_operand" "=w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl") (SVE_INT_UNARY:SVE_I (match_operand:SVE_I 2 "register_operand" "w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "\t%0., %1/m, %2." ) ;; ------------------------------------------------------------------------- ;; ---- [FP] General unary arithmetic corresponding to unspecs ;; ------------------------------------------------------------------------- ;; Includes: ;; - FABS ;; - FNEG ;; - FRINTA ;; - FRINTI ;; - FRINTM ;; - FRINTN ;; - FRINTP ;; - FRINTX ;; - FRINTZ ;; - FSQRT ;; ------------------------------------------------------------------------- ;; Unpredicated floating-point unary operations. (define_expand "2" [(set (match_operand:SVE_F 0 "register_operand") (unspec:SVE_F [(match_dup 2) (match_operand:SVE_F 1 "register_operand")] SVE_COND_FP_UNARY))] "TARGET_SVE" { operands[2] = aarch64_ptrue_reg (mode); } ) ;; Predicated floating-point unary operations. (define_insn "*2" [(set (match_operand:SVE_F 0 "register_operand" "=w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl") (match_operand:SVE_F 2 "register_operand" "w")] SVE_COND_FP_UNARY))] "TARGET_SVE" "\t%0., %1/m, %2." ) ;; ------------------------------------------------------------------------- ;; ---- [PRED] Inverse ;; ------------------------------------------------------------------------- ;; Includes: ;; - NOT ;; ------------------------------------------------------------------------- ;; Unpredicated predicate inverse. (define_expand "one_cmpl2" [(set (match_operand:PRED_ALL 0 "register_operand") (and:PRED_ALL (not:PRED_ALL (match_operand:PRED_ALL 1 "register_operand")) (match_dup 2)))] "TARGET_SVE" { operands[2] = aarch64_ptrue_reg (mode); } ) ;; Predicated predicate inverse. (define_insn "*one_cmpl3" [(set (match_operand:PRED_ALL 0 "register_operand" "=Upa") (and:PRED_ALL (not:PRED_ALL (match_operand:PRED_ALL 2 "register_operand" "Upa")) (match_operand:PRED_ALL 1 "register_operand" "Upa")))] "TARGET_SVE" "not\t%0.b, %1/z, %2.b" ) ;; ========================================================================= ;; == Binary arithmetic ;; ========================================================================= ;; ------------------------------------------------------------------------- ;; ---- [INT] General binary arithmetic corresponding to rtx codes ;; ------------------------------------------------------------------------- ;; Includes merging patterns for: ;; - ADD ;; - AND ;; - EOR ;; - MUL ;; - ORR ;; - SMAX ;; - SMIN ;; - SUB ;; - UMAX ;; - UMIN ;; ------------------------------------------------------------------------- ;; Predicated integer operations with merging. (define_expand "cond_" [(set (match_operand:SVE_I 0 "register_operand") (unspec:SVE_I [(match_operand: 1 "register_operand") (SVE_INT_BINARY:SVE_I (match_operand:SVE_I 2 "register_operand") (match_operand:SVE_I 3 "register_operand")) (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")] UNSPEC_SEL))] "TARGET_SVE" ) ;; Predicated integer operations, merging with the first input. (define_insn "*cond__2" [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl, Upl") (SVE_INT_BINARY:SVE_I (match_operand:SVE_I 2 "register_operand" "0, w") (match_operand:SVE_I 3 "register_operand" "w, w")) (match_dup 2)] UNSPEC_SEL))] "TARGET_SVE" "@ \t%0., %1/m, %0., %3. movprfx\t%0, %2\;\t%0., %1/m, %0., %3." [(set_attr "movprfx" "*,yes")] ) ;; Predicated integer operations, merging with the second input. (define_insn "*cond__3" [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl, Upl") (SVE_INT_BINARY:SVE_I (match_operand:SVE_I 2 "register_operand" "w, w") (match_operand:SVE_I 3 "register_operand" "0, w")) (match_dup 3)] UNSPEC_SEL))] "TARGET_SVE" "@ \t%0., %1/m, %0., %2. movprfx\t%0, %3\;\t%0., %1/m, %0., %2." [(set_attr "movprfx" "*,yes")] ) ;; Predicated integer operations, merging with an independent value. (define_insn_and_rewrite "*cond__any" [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, ?&w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl, Upl, Upl, Upl, Upl") (SVE_INT_BINARY:SVE_I (match_operand:SVE_I 2 "register_operand" "0, w, w, w, w") (match_operand:SVE_I 3 "register_operand" "w, 0, w, w, w")) (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")] UNSPEC_SEL))] "TARGET_SVE && !rtx_equal_p (operands[2], operands[4]) && !rtx_equal_p (operands[3], operands[4])" "@ movprfx\t%0., %1/z, %0.\;\t%0., %1/m, %0., %3. movprfx\t%0., %1/z, %0.\;\t%0., %1/m, %0., %2. movprfx\t%0., %1/z, %2.\;\t%0., %1/m, %0., %3. movprfx\t%0., %1/m, %2.\;\t%0., %1/m, %0., %3. #" "&& reload_completed && register_operand (operands[4], mode) && !rtx_equal_p (operands[0], operands[4])" { emit_insn (gen_vcond_mask_ (operands[0], operands[2], operands[4], operands[1])); operands[4] = operands[2] = operands[0]; } [(set_attr "movprfx" "yes")] ) ;; ------------------------------------------------------------------------- ;; ---- [INT] Addition ;; ------------------------------------------------------------------------- ;; Includes: ;; - ADD ;; - DECB ;; - DECD ;; - DECH ;; - DECW ;; - INCB ;; - INCD ;; - INCH ;; - INCW ;; - SUB ;; ------------------------------------------------------------------------- (define_insn "add3" [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w, w") (plus:SVE_I (match_operand:SVE_I 1 "register_operand" "%0, 0, 0, w") (match_operand:SVE_I 2 "aarch64_sve_add_operand" "vsa, vsn, vsi, w")))] "TARGET_SVE" "@ add\t%0., %0., #%D2 sub\t%0., %0., #%N2 * return aarch64_output_sve_inc_dec_immediate (\"%0.\", operands[2]); add\t%0., %1., %2." ) ;; Merging forms are handled through SVE_INT_BINARY. ;; ------------------------------------------------------------------------- ;; ---- [INT] Subtraction ;; ------------------------------------------------------------------------- ;; Includes: ;; - SUB ;; - SUBR ;; ------------------------------------------------------------------------- (define_insn "sub3" [(set (match_operand:SVE_I 0 "register_operand" "=w, w") (minus:SVE_I (match_operand:SVE_I 1 "aarch64_sve_arith_operand" "w, vsa") (match_operand:SVE_I 2 "register_operand" "w, 0")))] "TARGET_SVE" "@ sub\t%0., %1., %2. subr\t%0., %0., #%D1" ) ;; Merging forms are handled through SVE_INT_BINARY. ;; ------------------------------------------------------------------------- ;; ---- [INT] Absolute difference ;; ------------------------------------------------------------------------- ;; Includes: ;; - SABD ;; - UABD ;; ------------------------------------------------------------------------- ;; Unpredicated integer absolute difference. (define_expand "abd_3" [(use (match_operand:SVE_I 0 "register_operand")) (USMAX:SVE_I (match_operand:SVE_I 1 "register_operand") (match_operand:SVE_I 2 "register_operand"))] "TARGET_SVE" { rtx pred = aarch64_ptrue_reg (mode); emit_insn (gen_aarch64_abd_3 (operands[0], pred, operands[1], operands[2])); DONE; } ) ;; Predicated integer absolute difference. (define_insn "aarch64_abd_3" [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl, Upl") (minus:SVE_I (USMAX:SVE_I (match_operand:SVE_I 2 "register_operand" "0, w") (match_operand:SVE_I 3 "register_operand" "w, w")) (:SVE_I (match_dup 2) (match_dup 3)))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ abd\t%0., %1/m, %0., %3. movprfx\t%0, %2\;abd\t%0., %1/m, %0., %3." [(set_attr "movprfx" "*,yes")] ) ;; ------------------------------------------------------------------------- ;; ---- [INT] Multiplication ;; ------------------------------------------------------------------------- ;; Includes: ;; - MUL ;; ------------------------------------------------------------------------- ;; Unpredicated multiplication. (define_expand "mul3" [(set (match_operand:SVE_I 0 "register_operand") (unspec:SVE_I [(match_dup 3) (mult:SVE_I (match_operand:SVE_I 1 "register_operand") (match_operand:SVE_I 2 "aarch64_sve_mul_operand"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Multiplication predicated with a PTRUE. We don't actually need the ;; predicate for the first alternative, but using Upa or X isn't likely ;; to gain much and would make the instruction seem less uniform to the ;; register allocator. (define_insn_and_split "*mul3" [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "%0, 0, w") (match_operand:SVE_I 3 "aarch64_sve_mul_operand" "vsm, w, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ # mul\t%0., %1/m, %0., %3. movprfx\t%0, %2\;mul\t%0., %1/m, %0., %3." ; Split the unpredicated form after reload, so that we don't have ; the unnecessary PTRUE. "&& reload_completed && !register_operand (operands[3], mode)" [(set (match_dup 0) (mult:SVE_I (match_dup 2) (match_dup 3)))] "" [(set_attr "movprfx" "*,*,yes")] ) ;; Unpredicated multiplications by a constant (post-RA only). ;; These are generated by splitting a predicated instruction whose ;; predicate is unused. (define_insn "*post_ra_mul3" [(set (match_operand:SVE_I 0 "register_operand" "=w") (mult:SVE_I (match_operand:SVE_I 1 "register_operand" "0") (match_operand:SVE_I 2 "aarch64_sve_mul_immediate")))] "TARGET_SVE && reload_completed" "mul\t%0., %0., #%2" ) ;; Merging forms are handled through SVE_INT_BINARY. ;; ------------------------------------------------------------------------- ;; ---- [INT] Highpart multiplication ;; ------------------------------------------------------------------------- ;; Includes: ;; - SMULH ;; - UMULH ;; ------------------------------------------------------------------------- ;; Unpredicated highpart multiplication. (define_expand "mul3_highpart" [(set (match_operand:SVE_I 0 "register_operand") (unspec:SVE_I [(match_dup 3) (unspec:SVE_I [(match_operand:SVE_I 1 "register_operand") (match_operand:SVE_I 2 "register_operand")] MUL_HIGHPART)] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Predicated highpart multiplication. (define_insn "*mul3_highpart" [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl, Upl") (unspec:SVE_I [(match_operand:SVE_I 2 "register_operand" "%0, w") (match_operand:SVE_I 3 "register_operand" "w, w")] MUL_HIGHPART)] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ mulh\t%0., %1/m, %0., %3. movprfx\t%0, %2\;mulh\t%0., %1/m, %0., %3." [(set_attr "movprfx" "*,yes")] ) ;; ------------------------------------------------------------------------- ;; ---- [INT] Division ;; ------------------------------------------------------------------------- ;; Includes: ;; - SDIV ;; - SDIVR ;; - UDIV ;; - UDIVR ;; ------------------------------------------------------------------------- ;; Unpredicated integer division. (define_expand "3" [(set (match_operand:SVE_SDI 0 "register_operand") (unspec:SVE_SDI [(match_dup 3) (SVE_INT_BINARY_SD:SVE_SDI (match_operand:SVE_SDI 1 "register_operand") (match_operand:SVE_SDI 2 "register_operand"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Integer division predicated with a PTRUE. (define_insn "*3" [(set (match_operand:SVE_SDI 0 "register_operand" "=w, w, ?&w") (unspec:SVE_SDI [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (SVE_INT_BINARY_SD:SVE_SDI (match_operand:SVE_SDI 2 "register_operand" "0, w, w") (match_operand:SVE_SDI 3 "aarch64_sve_mul_operand" "w, 0, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ \t%0., %1/m, %0., %3. r\t%0., %1/m, %0., %2. movprfx\t%0, %2\;\t%0., %1/m, %0., %3." [(set_attr "movprfx" "*,*,yes")] ) ;; Predicated integer division with merging. (define_expand "cond_" [(set (match_operand:SVE_SDI 0 "register_operand") (unspec:SVE_SDI [(match_operand: 1 "register_operand") (SVE_INT_BINARY_SD:SVE_SDI (match_operand:SVE_SDI 2 "register_operand") (match_operand:SVE_SDI 3 "register_operand")) (match_operand:SVE_SDI 4 "aarch64_simd_reg_or_zero")] UNSPEC_SEL))] "TARGET_SVE" ) ;; Predicated integer division, merging with the first input. (define_insn "*cond__2" [(set (match_operand:SVE_SDI 0 "register_operand" "=w, ?&w") (unspec:SVE_SDI [(match_operand: 1 "register_operand" "Upl, Upl") (SVE_INT_BINARY_SD:SVE_SDI (match_operand:SVE_SDI 2 "register_operand" "0, w") (match_operand:SVE_SDI 3 "register_operand" "w, w")) (match_dup 2)] UNSPEC_SEL))] "TARGET_SVE" "@ \t%0., %1/m, %0., %3. movprfx\t%0, %2\;\t%0., %1/m, %0., %3." [(set_attr "movprfx" "*,yes")] ) ;; Predicated integer division, merging with the second input. (define_insn "*cond__3" [(set (match_operand:SVE_SDI 0 "register_operand" "=w, ?&w") (unspec:SVE_SDI [(match_operand: 1 "register_operand" "Upl, Upl") (SVE_INT_BINARY_SD:SVE_SDI (match_operand:SVE_SDI 2 "register_operand" "w, w") (match_operand:SVE_SDI 3 "register_operand" "0, w")) (match_dup 3)] UNSPEC_SEL))] "TARGET_SVE" "@ \t%0., %1/m, %0., %2. movprfx\t%0, %3\;\t%0., %1/m, %0., %2." [(set_attr "movprfx" "*,yes")] ) ;; Predicated integer division, merging with an independent value. (define_insn_and_rewrite "*cond__any" [(set (match_operand:SVE_SDI 0 "register_operand" "=&w, &w, &w, &w, ?&w") (unspec:SVE_SDI [(match_operand: 1 "register_operand" "Upl, Upl, Upl, Upl, Upl") (SVE_INT_BINARY_SD:SVE_SDI (match_operand:SVE_SDI 2 "register_operand" "0, w, w, w, w") (match_operand:SVE_SDI 3 "register_operand" "w, 0, w, w, w")) (match_operand:SVE_SDI 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")] UNSPEC_SEL))] "TARGET_SVE && !rtx_equal_p (operands[2], operands[4]) && !rtx_equal_p (operands[3], operands[4])" "@ movprfx\t%0., %1/z, %0.\;\t%0., %1/m, %0., %3. movprfx\t%0., %1/z, %0.\;\t%0., %1/m, %0., %2. movprfx\t%0., %1/z, %2.\;\t%0., %1/m, %0., %3. movprfx\t%0., %1/m, %2.\;\t%0., %1/m, %0., %3. #" "&& reload_completed && register_operand (operands[4], mode) && !rtx_equal_p (operands[0], operands[4])" { emit_insn (gen_vcond_mask_ (operands[0], operands[2], operands[4], operands[1])); operands[4] = operands[2] = operands[0]; } [(set_attr "movprfx" "yes")] ) ;; ------------------------------------------------------------------------- ;; ---- [INT] Binary logical operations ;; ------------------------------------------------------------------------- ;; Includes: ;; - AND ;; - EOR ;; - ORR ;; ------------------------------------------------------------------------- ;; Unpredicated integer binary logical operations. (define_insn "3" [(set (match_operand:SVE_I 0 "register_operand" "=w, w") (LOGICAL:SVE_I (match_operand:SVE_I 1 "register_operand" "%0, w") (match_operand:SVE_I 2 "aarch64_sve_logical_operand" "vsl, w")))] "TARGET_SVE" "@ \t%0., %0., #%C2 \t%0.d, %1.d, %2.d" ) ;; Merging forms are handled through SVE_INT_BINARY. ;; ------------------------------------------------------------------------- ;; ---- [INT] Binary logical operations (inverted second input) ;; ------------------------------------------------------------------------- ;; Includes: ;; - BIC ;; ------------------------------------------------------------------------- ;; REG_EQUAL notes on "not3" should ensure that we can generate ;; this pattern even though the NOT instruction itself is predicated. (define_insn "bic3" [(set (match_operand:SVE_I 0 "register_operand" "=w") (and:SVE_I (not:SVE_I (match_operand:SVE_I 1 "register_operand" "w")) (match_operand:SVE_I 2 "register_operand" "w")))] "TARGET_SVE" "bic\t%0.d, %2.d, %1.d" ) ;; ------------------------------------------------------------------------- ;; ---- [INT] Shifts ;; ------------------------------------------------------------------------- ;; Includes: ;; - ASR ;; - LSL ;; - LSR ;; ------------------------------------------------------------------------- ;; Unpredicated shift by a scalar, which expands into one of the vector ;; shifts below. (define_expand "3" [(set (match_operand:SVE_I 0 "register_operand") (ASHIFT:SVE_I (match_operand:SVE_I 1 "register_operand") (match_operand: 2 "general_operand")))] "TARGET_SVE" { rtx amount; if (CONST_INT_P (operands[2])) { amount = gen_const_vec_duplicate (mode, operands[2]); if (!aarch64_sve_shift_operand (operands[2], mode)) amount = force_reg (mode, amount); } else { amount = gen_reg_rtx (mode); emit_insn (gen_vec_duplicate (amount, convert_to_mode (mode, operands[2], 0))); } emit_insn (gen_v3 (operands[0], operands[1], amount)); DONE; } ) ;; Unpredicated shift by a vector. (define_expand "v3" [(set (match_operand:SVE_I 0 "register_operand") (unspec:SVE_I [(match_dup 3) (ASHIFT:SVE_I (match_operand:SVE_I 1 "register_operand") (match_operand:SVE_I 2 "aarch64_sve_shift_operand"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Shift by a vector, predicated with a PTRUE. We don't actually need ;; the predicate for the first alternative, but using Upa or X isn't ;; likely to gain much and would make the instruction seem less uniform ;; to the register allocator. (define_insn_and_split "*v3" [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (ASHIFT:SVE_I (match_operand:SVE_I 2 "register_operand" "w, 0, w") (match_operand:SVE_I 3 "aarch64_sve_shift_operand" "D, w, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ # \t%0., %1/m, %0., %3. movprfx\t%0, %2\;\t%0., %1/m, %0., %3." "&& reload_completed && !register_operand (operands[3], mode)" [(set (match_dup 0) (ASHIFT:SVE_I (match_dup 2) (match_dup 3)))] "" [(set_attr "movprfx" "*,*,yes")] ) ;; Unpredicated shift operations by a constant (post-RA only). ;; These are generated by splitting a predicated instruction whose ;; predicate is unused. (define_insn "*post_ra_v3" [(set (match_operand:SVE_I 0 "register_operand" "=w") (ASHIFT:SVE_I (match_operand:SVE_I 1 "register_operand" "w") (match_operand:SVE_I 2 "aarch64_simd_shift_imm")))] "TARGET_SVE && reload_completed" "\t%0., %1., #%2" ) ;; ------------------------------------------------------------------------- ;; ---- [INT] Maximum and minimum ;; ------------------------------------------------------------------------- ;; Includes: ;; - SMAX ;; - SMIN ;; - UMAX ;; - UMIN ;; ------------------------------------------------------------------------- ;; Unpredicated integer MAX/MIN. (define_expand "3" [(set (match_operand:SVE_I 0 "register_operand") (unspec:SVE_I [(match_dup 3) (MAXMIN:SVE_I (match_operand:SVE_I 1 "register_operand") (match_operand:SVE_I 2 "register_operand"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Integer MAX/MIN predicated with a PTRUE. (define_insn "*3" [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl, Upl") (MAXMIN:SVE_I (match_operand:SVE_I 2 "register_operand" "%0, w") (match_operand:SVE_I 3 "register_operand" "w, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ \t%0., %1/m, %0., %3. movprfx\t%0, %2\;\t%0., %1/m, %0., %3." [(set_attr "movprfx" "*,yes")] ) ;; Merging forms are handled through SVE_INT_BINARY. ;; ------------------------------------------------------------------------- ;; ---- [FP] General binary arithmetic corresponding to rtx codes ;; ------------------------------------------------------------------------- ;; Includes post-RA forms of: ;; - FADD ;; - FMUL ;; - FSUB ;; ------------------------------------------------------------------------- ;; Unpredicated floating-point binary operations (post-RA only). ;; These are generated by splitting a predicated instruction whose ;; predicate is unused. (define_insn "*post_ra_3" [(set (match_operand:SVE_F 0 "register_operand" "=w") (SVE_UNPRED_FP_BINARY:SVE_F (match_operand:SVE_F 1 "register_operand" "w") (match_operand:SVE_F 2 "register_operand" "w")))] "TARGET_SVE && reload_completed" "\t%0., %1., %2.") ;; ------------------------------------------------------------------------- ;; ---- [FP] General binary arithmetic corresponding to unspecs ;; ------------------------------------------------------------------------- ;; Includes merging forms of: ;; - FADD ;; - FDIV ;; - FDIVR ;; - FMAXNM ;; - FMINNM ;; - FMUL ;; - FSUB ;; - FSUBR ;; ------------------------------------------------------------------------- ;; Predicated floating-point operations with merging. (define_expand "cond_" [(set (match_operand:SVE_F 0 "register_operand") (unspec:SVE_F [(match_operand: 1 "register_operand") (unspec:SVE_F [(match_operand:SVE_F 2 "register_operand") (match_operand:SVE_F 3 "register_operand")] SVE_COND_FP_BINARY) (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero")] UNSPEC_SEL))] "TARGET_SVE" ) ;; Predicated floating-point operations, merging with the first input. (define_insn "*cond__2" [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl, Upl") (unspec:SVE_F [(match_operand:SVE_F 2 "register_operand" "0, w") (match_operand:SVE_F 3 "register_operand" "w, w")] SVE_COND_FP_BINARY) (match_dup 2)] UNSPEC_SEL))] "TARGET_SVE" "@ \t%0., %1/m, %0., %3. movprfx\t%0, %2\;\t%0., %1/m, %0., %3." [(set_attr "movprfx" "*,yes")] ) ;; Predicated floating-point operations, merging with the second input. (define_insn "*cond__3" [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl, Upl") (unspec:SVE_F [(match_operand:SVE_F 2 "register_operand" "w, w") (match_operand:SVE_F 3 "register_operand" "0, w")] SVE_COND_FP_BINARY) (match_dup 3)] UNSPEC_SEL))] "TARGET_SVE" "@ \t%0., %1/m, %0., %2. movprfx\t%0, %3\;\t%0., %1/m, %0., %2." [(set_attr "movprfx" "*,yes")] ) ;; Predicated floating-point operations, merging with an independent value. (define_insn_and_rewrite "*cond__any" [(set (match_operand:SVE_F 0 "register_operand" "=&w, &w, &w, &w, ?&w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl, Upl, Upl, Upl, Upl") (unspec:SVE_F [(match_operand:SVE_F 2 "register_operand" "0, w, w, w, w") (match_operand:SVE_F 3 "register_operand" "w, 0, w, w, w")] SVE_COND_FP_BINARY) (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")] UNSPEC_SEL))] "TARGET_SVE && !rtx_equal_p (operands[2], operands[4]) && !rtx_equal_p (operands[3], operands[4])" "@ movprfx\t%0., %1/z, %0.\;\t%0., %1/m, %0., %3. movprfx\t%0., %1/z, %0.\;\t%0., %1/m, %0., %2. movprfx\t%0., %1/z, %2.\;\t%0., %1/m, %0., %3. movprfx\t%0., %1/m, %2.\;\t%0., %1/m, %0., %3. #" "&& reload_completed && register_operand (operands[4], mode) && !rtx_equal_p (operands[0], operands[4])" { emit_insn (gen_vcond_mask_ (operands[0], operands[2], operands[4], operands[1])); operands[4] = operands[2] = operands[0]; } [(set_attr "movprfx" "yes")] ) ;; ------------------------------------------------------------------------- ;; ---- [FP] Addition ;; ------------------------------------------------------------------------- ;; Includes: ;; - FADD ;; - FSUB ;; ------------------------------------------------------------------------- ;; Unpredicated floating-point addition. (define_expand "add3" [(set (match_operand:SVE_F 0 "register_operand") (unspec:SVE_F [(match_dup 3) (plus:SVE_F (match_operand:SVE_F 1 "register_operand") (match_operand:SVE_F 2 "aarch64_sve_float_arith_with_sub_operand"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Floating-point addition predicated with a PTRUE. (define_insn_and_split "*add3" [(set (match_operand:SVE_F 0 "register_operand" "=w, w, w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (plus:SVE_F (match_operand:SVE_F 2 "register_operand" "%0, 0, w") (match_operand:SVE_F 3 "aarch64_sve_float_arith_with_sub_operand" "vsA, vsN, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ fadd\t%0., %1/m, %0., #%3 fsub\t%0., %1/m, %0., #%N3 #" ; Split the unpredicated form after reload, so that we don't have ; the unnecessary PTRUE. "&& reload_completed && register_operand (operands[3], mode)" [(set (match_dup 0) (plus:SVE_F (match_dup 2) (match_dup 3)))] ) ;; Merging forms are handled through SVE_COND_FP_BINARY. ;; ------------------------------------------------------------------------- ;; ---- [FP] Subtraction ;; ------------------------------------------------------------------------- ;; Includes: ;; - FADD ;; - FSUB ;; - FSUBR ;; ------------------------------------------------------------------------- ;; Unpredicated floating-point subtraction. (define_expand "sub3" [(set (match_operand:SVE_F 0 "register_operand") (unspec:SVE_F [(match_dup 3) (minus:SVE_F (match_operand:SVE_F 1 "aarch64_sve_float_arith_operand") (match_operand:SVE_F 2 "register_operand"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Floating-point subtraction predicated with a PTRUE. (define_insn_and_split "*sub3" [(set (match_operand:SVE_F 0 "register_operand" "=w, w, w, w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl, Upl, Upl, Upl") (minus:SVE_F (match_operand:SVE_F 2 "aarch64_sve_float_arith_operand" "0, 0, vsA, w") (match_operand:SVE_F 3 "aarch64_sve_float_arith_with_sub_operand" "vsA, vsN, 0, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE && (register_operand (operands[2], mode) || register_operand (operands[3], mode))" "@ fsub\t%0., %1/m, %0., #%3 fadd\t%0., %1/m, %0., #%N3 fsubr\t%0., %1/m, %0., #%2 #" ; Split the unpredicated form after reload, so that we don't have ; the unnecessary PTRUE. "&& reload_completed && register_operand (operands[2], mode) && register_operand (operands[3], mode)" [(set (match_dup 0) (minus:SVE_F (match_dup 2) (match_dup 3)))] ) ;; Merging forms are handled through SVE_COND_FP_BINARY. ;; ------------------------------------------------------------------------- ;; ---- [FP] Absolute difference ;; ------------------------------------------------------------------------- ;; Includes: ;; - FABD ;; ------------------------------------------------------------------------- ;; Predicated floating-point absolute difference. (define_insn "*fabd3" [(set (match_operand:SVE_F 0 "register_operand" "=w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl") (minus:SVE_F (match_operand:SVE_F 2 "register_operand" "0") (match_operand:SVE_F 3 "register_operand" "w"))] UNSPEC_COND_FABS))] "TARGET_SVE" "fabd\t%0., %1/m, %2., %3." ) ;; ------------------------------------------------------------------------- ;; ---- [FP] Multiplication ;; ------------------------------------------------------------------------- ;; Includes: ;; - FMUL ;; ------------------------------------------------------------------------- ;; Unpredicated floating-point multiplication. (define_expand "mul3" [(set (match_operand:SVE_F 0 "register_operand") (unspec:SVE_F [(match_dup 3) (mult:SVE_F (match_operand:SVE_F 1 "register_operand") (match_operand:SVE_F 2 "aarch64_sve_float_mul_operand"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Floating-point multiplication predicated with a PTRUE. (define_insn_and_split "*mul3" [(set (match_operand:SVE_F 0 "register_operand" "=w, w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl, Upl") (mult:SVE_F (match_operand:SVE_F 2 "register_operand" "%0, w") (match_operand:SVE_F 3 "aarch64_sve_float_mul_operand" "vsM, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ fmul\t%0., %1/m, %0., #%3 #" ; Split the unpredicated form after reload, so that we don't have ; the unnecessary PTRUE. "&& reload_completed && register_operand (operands[3], mode)" [(set (match_dup 0) (mult:SVE_F (match_dup 2) (match_dup 3)))] ) ;; Merging forms are handled through SVE_COND_FP_BINARY. ;; ------------------------------------------------------------------------- ;; ---- [FP] Division ;; ------------------------------------------------------------------------- ;; Includes: ;; - FDIV ;; - FDIVR ;; ------------------------------------------------------------------------- ;; Unpredicated floating-point division. (define_expand "div3" [(set (match_operand:SVE_F 0 "register_operand") (unspec:SVE_F [(match_dup 3) (div:SVE_F (match_operand:SVE_F 1 "register_operand") (match_operand:SVE_F 2 "register_operand"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Floating-point division predicated with a PTRUE. (define_insn "*div3" [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?&w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (div:SVE_F (match_operand:SVE_F 2 "register_operand" "0, w, w") (match_operand:SVE_F 3 "register_operand" "w, 0, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ fdiv\t%0., %1/m, %0., %3. fdivr\t%0., %1/m, %0., %2. movprfx\t%0, %2\;fdiv\t%0., %1/m, %0., %3." [(set_attr "movprfx" "*,*,yes")] ) ;; Merging forms are handled through SVE_COND_FP_BINARY. ;; ------------------------------------------------------------------------- ;; ---- [FP] Binary logical operations ;; ------------------------------------------------------------------------- ;; Includes ;; - AND ;; - EOR ;; - ORR ;; ------------------------------------------------------------------------- ;; Binary logical operations on floating-point modes. We avoid subregs ;; by providing this, but we need to use UNSPECs since rtx logical ops ;; aren't defined for floating-point modes. (define_insn "*3" [(set (match_operand:SVE_F 0 "register_operand" "=w") (unspec:SVE_F [(match_operand:SVE_F 1 "register_operand" "w") (match_operand:SVE_F 2 "register_operand" "w")] LOGICALF))] "TARGET_SVE" "\t%0.d, %1.d, %2.d" ) ;; ------------------------------------------------------------------------- ;; ---- [FP] Sign copying ;; ------------------------------------------------------------------------- ;; The patterns in this section are synthetic. ;; ------------------------------------------------------------------------- (define_expand "copysign3" [(match_operand:SVE_F 0 "register_operand") (match_operand:SVE_F 1 "register_operand") (match_operand:SVE_F 2 "register_operand")] "TARGET_SVE" { rtx sign = gen_reg_rtx (mode); rtx mant = gen_reg_rtx (mode); rtx int_res = gen_reg_rtx (mode); int bits = GET_MODE_UNIT_BITSIZE (mode) - 1; rtx arg1 = lowpart_subreg (mode, operands[1], mode); rtx arg2 = lowpart_subreg (mode, operands[2], mode); emit_insn (gen_and3 (sign, arg2, aarch64_simd_gen_const_vector_dup (mode, HOST_WIDE_INT_M1U << bits))); emit_insn (gen_and3 (mant, arg1, aarch64_simd_gen_const_vector_dup (mode, ~(HOST_WIDE_INT_M1U << bits)))); emit_insn (gen_ior3 (int_res, sign, mant)); emit_move_insn (operands[0], gen_lowpart (mode, int_res)); DONE; } ) (define_expand "xorsign3" [(match_operand:SVE_F 0 "register_operand") (match_operand:SVE_F 1 "register_operand") (match_operand:SVE_F 2 "register_operand")] "TARGET_SVE" { rtx sign = gen_reg_rtx (mode); rtx int_res = gen_reg_rtx (mode); int bits = GET_MODE_UNIT_BITSIZE (mode) - 1; rtx arg1 = lowpart_subreg (mode, operands[1], mode); rtx arg2 = lowpart_subreg (mode, operands[2], mode); emit_insn (gen_and3 (sign, arg2, aarch64_simd_gen_const_vector_dup (mode, HOST_WIDE_INT_M1U << bits))); emit_insn (gen_xor3 (int_res, arg1, sign)); emit_move_insn (operands[0], gen_lowpart (mode, int_res)); DONE; } ) ;; ------------------------------------------------------------------------- ;; ---- [FP] Maximum and minimum ;; ------------------------------------------------------------------------- ;; Includes: ;; - FMAXNM ;; - FMINNM ;; ------------------------------------------------------------------------- ;; Unpredicated floating-point MAX/MIN (the rtx codes). These are more ;; relaxed than fmax/fmin, but we implement them in the same way. (define_expand "3" [(set (match_operand:SVE_F 0 "register_operand") (unspec:SVE_F [(match_dup 3) (match_operand:SVE_F 1 "register_operand") (match_operand:SVE_F 2 "register_operand")] SVE_COND_FP_MAXMIN_PUBLIC))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Unpredicated fmax/fmin (the libm functions). (define_expand "3" [(set (match_operand:SVE_F 0 "register_operand") (unspec:SVE_F [(match_dup 3) (match_operand:SVE_F 1 "register_operand") (match_operand:SVE_F 2 "register_operand")] SVE_COND_FP_MAXMIN_PUBLIC))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Predicated floating-point maximum/minimum. (define_insn "*3" [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl, Upl") (match_operand:SVE_F 2 "register_operand" "%0, w") (match_operand:SVE_F 3 "register_operand" "w, w")] SVE_COND_FP_MAXMIN_PUBLIC))] "TARGET_SVE" "@ \t%0., %1/m, %0., %3. movprfx\t%0, %2\;\t%0., %1/m, %0., %3." [(set_attr "movprfx" "*,yes")] ) ;; Merging forms are handled through SVE_COND_FP_BINARY. ;; ------------------------------------------------------------------------- ;; ---- [PRED] Binary logical operations ;; ------------------------------------------------------------------------- ;; Includes: ;; - AND ;; - ANDS ;; - EOR ;; - EORS ;; - ORR ;; - ORRS ;; ------------------------------------------------------------------------- ;; Predicate AND. We can reuse one of the inputs as the GP. (define_insn "and3" [(set (match_operand:PRED_ALL 0 "register_operand" "=Upa") (and:PRED_ALL (match_operand:PRED_ALL 1 "register_operand" "Upa") (match_operand:PRED_ALL 2 "register_operand" "Upa")))] "TARGET_SVE" "and\t%0.b, %1/z, %1.b, %2.b" ) ;; Unpredicated predicate EOR and ORR. (define_expand "3" [(set (match_operand:PRED_ALL 0 "register_operand") (and:PRED_ALL (LOGICAL_OR:PRED_ALL (match_operand:PRED_ALL 1 "register_operand") (match_operand:PRED_ALL 2 "register_operand")) (match_dup 3)))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Predicated predicate AND, EOR and ORR. (define_insn "pred_3" [(set (match_operand:PRED_ALL 0 "register_operand" "=Upa") (and:PRED_ALL (LOGICAL:PRED_ALL (match_operand:PRED_ALL 2 "register_operand" "Upa") (match_operand:PRED_ALL 3 "register_operand" "Upa")) (match_operand:PRED_ALL 1 "register_operand" "Upa")))] "TARGET_SVE" "\t%0.b, %1/z, %2.b, %3.b" ) ;; Perform a logical operation on operands 2 and 3, using operand 1 as ;; the GP (which is known to be a PTRUE). Store the result in operand 0 ;; and set the flags in the same way as for PTEST. The (and ...) in the ;; UNSPEC_PTEST_PTRUE is logically redundant, but means that the tested ;; value is structurally equivalent to rhs of the second set. (define_insn "*3_cc" [(set (reg:CC_NZC CC_REGNUM) (unspec:CC_NZC [(match_operand:PRED_ALL 1 "register_operand" "Upa") (and:PRED_ALL (LOGICAL:PRED_ALL (match_operand:PRED_ALL 2 "register_operand" "Upa") (match_operand:PRED_ALL 3 "register_operand" "Upa")) (match_dup 1))] UNSPEC_PTEST_PTRUE)) (set (match_operand:PRED_ALL 0 "register_operand" "=Upa") (and:PRED_ALL (LOGICAL:PRED_ALL (match_dup 2) (match_dup 3)) (match_dup 1)))] "TARGET_SVE" "s\t%0.b, %1/z, %2.b, %3.b" ) ;; ------------------------------------------------------------------------- ;; ---- [PRED] Binary logical operations (inverted second input) ;; ------------------------------------------------------------------------- ;; Includes: ;; - BIC ;; - ORN ;; ------------------------------------------------------------------------- ;; Predicated predicate BIC and ORN. (define_insn "*3" [(set (match_operand:PRED_ALL 0 "register_operand" "=Upa") (and:PRED_ALL (NLOGICAL:PRED_ALL (not:PRED_ALL (match_operand:PRED_ALL 2 "register_operand" "Upa")) (match_operand:PRED_ALL 3 "register_operand" "Upa")) (match_operand:PRED_ALL 1 "register_operand" "Upa")))] "TARGET_SVE" "\t%0.b, %1/z, %3.b, %2.b" ) ;; ------------------------------------------------------------------------- ;; ---- [PRED] Binary logical operations (inverted result) ;; ------------------------------------------------------------------------- ;; Includes: ;; - NAND ;; - NOR ;; ------------------------------------------------------------------------- ;; Predicated predicate NAND and NOR. (define_insn "*3" [(set (match_operand:PRED_ALL 0 "register_operand" "=Upa") (and:PRED_ALL (NLOGICAL:PRED_ALL (not:PRED_ALL (match_operand:PRED_ALL 2 "register_operand" "Upa")) (not:PRED_ALL (match_operand:PRED_ALL 3 "register_operand" "Upa"))) (match_operand:PRED_ALL 1 "register_operand" "Upa")))] "TARGET_SVE" "\t%0.b, %1/z, %2.b, %3.b" ) ;; ========================================================================= ;; == Ternary arithmetic ;; ========================================================================= ;; ------------------------------------------------------------------------- ;; ---- [INT] MLA and MAD ;; ------------------------------------------------------------------------- ;; Includes: ;; - MAD ;; - MLA ;; ------------------------------------------------------------------------- ;; Predicated integer addition of product. (define_insn "*madd" [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w") (plus:SVE_I (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "%0, w, w") (match_operand:SVE_I 3 "register_operand" "w, w, w"))] UNSPEC_MERGE_PTRUE) (match_operand:SVE_I 4 "register_operand" "w, 0, w")))] "TARGET_SVE" "@ mad\t%0., %1/m, %3., %4. mla\t%0., %1/m, %2., %3. movprfx\t%0, %4\;mla\t%0., %1/m, %2., %3." [(set_attr "movprfx" "*,*,yes")] ) ;; ------------------------------------------------------------------------- ;; ---- [INT] MLS and MSB ;; ------------------------------------------------------------------------- ;; Includes: ;; - MLS ;; - MSB ;; ------------------------------------------------------------------------- ;; Predicated integer subtraction of product. (define_insn "*msub3" [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w") (minus:SVE_I (match_operand:SVE_I 4 "register_operand" "w, 0, w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "%0, w, w") (match_operand:SVE_I 3 "register_operand" "w, w, w"))] UNSPEC_MERGE_PTRUE)))] "TARGET_SVE" "@ msb\t%0., %1/m, %3., %4. mls\t%0., %1/m, %2., %3. movprfx\t%0, %4\;mls\t%0., %1/m, %2., %3." [(set_attr "movprfx" "*,*,yes")] ) ;; ------------------------------------------------------------------------- ;; ---- [INT] Dot product ;; ------------------------------------------------------------------------- ;; Includes: ;; - SDOT ;; - UDOT ;; ------------------------------------------------------------------------- ;; Four-element integer dot-product with accumulation. (define_insn "dot_prod" [(set (match_operand:SVE_SDI 0 "register_operand" "=w, ?&w") (plus:SVE_SDI (unspec:SVE_SDI [(match_operand: 1 "register_operand" "w, w") (match_operand: 2 "register_operand" "w, w")] DOTPROD) (match_operand:SVE_SDI 3 "register_operand" "0, w")))] "TARGET_SVE" "@ dot\\t%0., %1., %2. movprfx\t%0, %3\;dot\\t%0., %1., %2." [(set_attr "movprfx" "*,yes")] ) ;; ------------------------------------------------------------------------- ;; ---- [INT] Sum of absolute differences ;; ------------------------------------------------------------------------- ;; The patterns in this section are synthetic. ;; ------------------------------------------------------------------------- ;; Emit a sequence to produce a sum-of-absolute-differences of the inputs in ;; operands 1 and 2. The sequence also has to perform a widening reduction of ;; the difference into a vector and accumulate that into operand 3 before ;; copying that into the result operand 0. ;; Perform that with a sequence of: ;; MOV ones.b, #1 ;; [SU]ABD diff.b, p0/m, op1.b, op2.b ;; MOVPRFX op0, op3 // If necessary ;; UDOT op0.s, diff.b, ones.b (define_expand "sad" [(use (match_operand:SVE_SDI 0 "register_operand")) (unspec: [(use (match_operand: 1 "register_operand")) (use (match_operand: 2 "register_operand"))] ABAL) (use (match_operand:SVE_SDI 3 "register_operand"))] "TARGET_SVE" { rtx ones = force_reg (mode, CONST1_RTX (mode)); rtx diff = gen_reg_rtx (mode); emit_insn (gen_abd_3 (diff, operands[1], operands[2])); emit_insn (gen_udot_prod (operands[0], diff, ones, operands[3])); DONE; } ) ;; ------------------------------------------------------------------------- ;; ---- [FP] General ternary arithmetic corresponding to unspecs ;; ------------------------------------------------------------------------- ;; Includes merging patterns for: ;; - FMAD ;; - FMLA ;; - FMLS ;; - FMSB ;; - FNMAD ;; - FNMLA ;; - FNMLS ;; - FNMSB ;; ------------------------------------------------------------------------- ;; Unpredicated floating-point ternary operations. (define_expand "4" [(set (match_operand:SVE_F 0 "register_operand") (unspec:SVE_F [(match_dup 4) (match_operand:SVE_F 1 "register_operand") (match_operand:SVE_F 2 "register_operand") (match_operand:SVE_F 3 "register_operand")] SVE_COND_FP_TERNARY))] "TARGET_SVE" { operands[4] = aarch64_ptrue_reg (mode); } ) ;; Predicated floating-point ternary operations. (define_insn "*4" [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?&w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (match_operand:SVE_F 2 "register_operand" "%w, 0, w") (match_operand:SVE_F 3 "register_operand" "w, w, w") (match_operand:SVE_F 4 "register_operand" "0, w, w")] SVE_COND_FP_TERNARY))] "TARGET_SVE" "@ \t%0., %1/m, %2., %3. \t%0., %1/m, %3., %4. movprfx\t%0, %4\;\t%0., %1/m, %2., %3." [(set_attr "movprfx" "*,*,yes")] ) ;; Predicated floating-point ternary operations with merging. (define_expand "cond_" [(set (match_operand:SVE_F 0 "register_operand") (unspec:SVE_F [(match_operand: 1 "register_operand") (unspec:SVE_F [(match_dup 1) (match_operand:SVE_F 2 "register_operand") (match_operand:SVE_F 3 "register_operand") (match_operand:SVE_F 4 "register_operand")] SVE_COND_FP_TERNARY) (match_operand:SVE_F 5 "aarch64_simd_reg_or_zero")] UNSPEC_SEL))] "TARGET_SVE" { /* Swap the multiplication operands if the fallback value is the second of the two. */ if (rtx_equal_p (operands[3], operands[5])) std::swap (operands[2], operands[3]); }) ;; Predicated floating-point ternary operations, merging with the ;; first input. (define_insn "*cond__2" [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl, Upl") (unspec:SVE_F [(match_dup 1) (match_operand:SVE_F 2 "register_operand" "0, w") (match_operand:SVE_F 3 "register_operand" "w, w") (match_operand:SVE_F 4 "register_operand" "w, w")] SVE_COND_FP_TERNARY) (match_dup 2)] UNSPEC_SEL))] "TARGET_SVE" "@ \t%0., %1/m, %3., %4. movprfx\t%0, %2\;\t%0., %1/m, %3., %4." [(set_attr "movprfx" "*,yes")] ) ;; Predicated floating-point ternary operations, merging with the ;; third input. (define_insn "*cond__4" [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl, Upl") (unspec:SVE_F [(match_dup 1) (match_operand:SVE_F 2 "register_operand" "w, w") (match_operand:SVE_F 3 "register_operand" "w, w") (match_operand:SVE_F 4 "register_operand" "0, w")] SVE_COND_FP_TERNARY) (match_dup 4)] UNSPEC_SEL))] "TARGET_SVE" "@ \t%0., %1/m, %2., %3. movprfx\t%0, %4\;\t%0., %1/m, %2., %3." [(set_attr "movprfx" "*,yes")] ) ;; Predicated floating-point ternary operations, merging with an ;; independent value. (define_insn_and_rewrite "*cond__any" [(set (match_operand:SVE_F 0 "register_operand" "=&w, &w, ?&w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (unspec:SVE_F [(match_dup 1) (match_operand:SVE_F 2 "register_operand" "w, w, w") (match_operand:SVE_F 3 "register_operand" "w, w, w") (match_operand:SVE_F 4 "register_operand" "w, w, w")] SVE_COND_FP_TERNARY) (match_operand:SVE_F 5 "aarch64_simd_reg_or_zero" "Dz, 0, w")] UNSPEC_SEL))] "TARGET_SVE && !rtx_equal_p (operands[2], operands[5]) && !rtx_equal_p (operands[3], operands[5]) && !rtx_equal_p (operands[4], operands[5])" "@ movprfx\t%0., %1/z, %4.\;\t%0., %1/m, %2., %3. movprfx\t%0., %1/m, %4.\;\t%0., %1/m, %2., %3. #" "&& reload_completed && !CONSTANT_P (operands[5]) && !rtx_equal_p (operands[0], operands[5])" { emit_insn (gen_vcond_mask_ (operands[0], operands[4], operands[5], operands[1])); operands[5] = operands[4] = operands[0]; } [(set_attr "movprfx" "yes")] ) ;; ========================================================================= ;; == Comparisons and selects ;; ========================================================================= ;; ------------------------------------------------------------------------- ;; ---- [INT,FP] Select based on predicates ;; ------------------------------------------------------------------------- ;; Includes merging patterns for: ;; - MOV ;; - SEL ;; ------------------------------------------------------------------------- ;; vcond_mask operand order: true, false, mask ;; UNSPEC_SEL operand order: mask, true, false (as for VEC_COND_EXPR) ;; SEL operand order: mask, true, false (define_insn "vcond_mask_" [(set (match_operand:SVE_ALL 0 "register_operand" "=w") (unspec:SVE_ALL [(match_operand: 3 "register_operand" "Upa") (match_operand:SVE_ALL 1 "register_operand" "w") (match_operand:SVE_ALL 2 "register_operand" "w")] UNSPEC_SEL))] "TARGET_SVE" "sel\t%0., %3, %1., %2." ) ;; Selects between a duplicated immediate and zero. (define_insn "aarch64_sve_dup_const" [(set (match_operand:SVE_I 0 "register_operand" "=w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl") (match_operand:SVE_I 2 "aarch64_sve_dup_immediate") (match_operand:SVE_I 3 "aarch64_simd_imm_zero")] UNSPEC_SEL))] "TARGET_SVE" "mov\t%0., %1/z, #%2" ) ;; ------------------------------------------------------------------------- ;; ---- [INT,FP] Compare and select ;; ------------------------------------------------------------------------- ;; The patterns in this section are synthetic. ;; ------------------------------------------------------------------------- ;; Integer (signed) vcond. Don't enforce an immediate range here, since it ;; depends on the comparison; leave it to aarch64_expand_sve_vcond instead. (define_expand "vcond" [(set (match_operand:SVE_ALL 0 "register_operand") (if_then_else:SVE_ALL (match_operator 3 "comparison_operator" [(match_operand: 4 "register_operand") (match_operand: 5 "nonmemory_operand")]) (match_operand:SVE_ALL 1 "register_operand") (match_operand:SVE_ALL 2 "register_operand")))] "TARGET_SVE" { aarch64_expand_sve_vcond (mode, mode, operands); DONE; } ) ;; Integer vcondu. Don't enforce an immediate range here, since it ;; depends on the comparison; leave it to aarch64_expand_sve_vcond instead. (define_expand "vcondu" [(set (match_operand:SVE_ALL 0 "register_operand") (if_then_else:SVE_ALL (match_operator 3 "comparison_operator" [(match_operand: 4 "register_operand") (match_operand: 5 "nonmemory_operand")]) (match_operand:SVE_ALL 1 "register_operand") (match_operand:SVE_ALL 2 "register_operand")))] "TARGET_SVE" { aarch64_expand_sve_vcond (mode, mode, operands); DONE; } ) ;; Floating-point vcond. All comparisons except FCMUO allow a zero operand; ;; aarch64_expand_sve_vcond handles the case of an FCMUO with zero. (define_expand "vcond" [(set (match_operand:SVE_SD 0 "register_operand") (if_then_else:SVE_SD (match_operator 3 "comparison_operator" [(match_operand: 4 "register_operand") (match_operand: 5 "aarch64_simd_reg_or_zero")]) (match_operand:SVE_SD 1 "register_operand") (match_operand:SVE_SD 2 "register_operand")))] "TARGET_SVE" { aarch64_expand_sve_vcond (mode, mode, operands); DONE; } ) ;; ------------------------------------------------------------------------- ;; ---- [INT] Comparisons ;; ------------------------------------------------------------------------- ;; Includes merging patterns for: ;; - CMPEQ ;; - CMPGE ;; - CMPGT ;; - CMPHI ;; - CMPHS ;; - CMPLE ;; - CMPLO ;; - CMPLS ;; - CMPLT ;; - CMPNE ;; ------------------------------------------------------------------------- ;; Signed integer comparisons. Don't enforce an immediate range here, since ;; it depends on the comparison; leave it to aarch64_expand_sve_vec_cmp_int ;; instead. (define_expand "vec_cmp" [(parallel [(set (match_operand: 0 "register_operand") (match_operator: 1 "comparison_operator" [(match_operand:SVE_I 2 "register_operand") (match_operand:SVE_I 3 "nonmemory_operand")])) (clobber (reg:CC_NZC CC_REGNUM))])] "TARGET_SVE" { aarch64_expand_sve_vec_cmp_int (operands[0], GET_CODE (operands[1]), operands[2], operands[3]); DONE; } ) ;; Unsigned integer comparisons. Don't enforce an immediate range here, since ;; it depends on the comparison; leave it to aarch64_expand_sve_vec_cmp_int ;; instead. (define_expand "vec_cmpu" [(parallel [(set (match_operand: 0 "register_operand") (match_operator: 1 "comparison_operator" [(match_operand:SVE_I 2 "register_operand") (match_operand:SVE_I 3 "nonmemory_operand")])) (clobber (reg:CC_NZC CC_REGNUM))])] "TARGET_SVE" { aarch64_expand_sve_vec_cmp_int (operands[0], GET_CODE (operands[1]), operands[2], operands[3]); DONE; } ) ;; Integer comparisons predicated with a PTRUE. (define_insn "*cmp" [(set (match_operand: 0 "register_operand" "=Upa, Upa") (unspec: [(match_operand: 1 "register_operand" "Upl, Upl") (SVE_INT_CMP: (match_operand:SVE_I 2 "register_operand" "w, w") (match_operand:SVE_I 3 "aarch64_sve_cmp__operand" ", w"))] UNSPEC_MERGE_PTRUE)) (clobber (reg:CC_NZC CC_REGNUM))] "TARGET_SVE" "@ cmp\t%0., %1/z, %2., #%3 cmp\t%0., %1/z, %2., %3." ) ;; Integer comparisons predicated with a PTRUE in which both the flag and ;; predicate results are interesting. (define_insn "*cmp_cc" [(set (reg:CC_NZC CC_REGNUM) (unspec:CC_NZC [(match_operand: 1 "register_operand" "Upl, Upl") (unspec: [(match_dup 1) (SVE_INT_CMP: (match_operand:SVE_I 2 "register_operand" "w, w") (match_operand:SVE_I 3 "aarch64_sve_cmp__operand" ", w"))] UNSPEC_MERGE_PTRUE)] UNSPEC_PTEST_PTRUE)) (set (match_operand: 0 "register_operand" "=Upa, Upa") (unspec: [(match_dup 1) (SVE_INT_CMP: (match_dup 2) (match_dup 3))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ cmp\t%0., %1/z, %2., #%3 cmp\t%0., %1/z, %2., %3." ) ;; Integer comparisons predicated with a PTRUE in which only the flags result ;; is interesting. (define_insn "*cmp_ptest" [(set (reg:CC_NZC CC_REGNUM) (unspec:CC_NZC [(match_operand: 1 "register_operand" "Upl, Upl") (unspec: [(match_dup 1) (SVE_INT_CMP: (match_operand:SVE_I 2 "register_operand" "w, w") (match_operand:SVE_I 3 "aarch64_sve_cmp__operand" ", w"))] UNSPEC_MERGE_PTRUE)] UNSPEC_PTEST_PTRUE)) (clobber (match_scratch: 0 "=Upa, Upa"))] "TARGET_SVE" "@ cmp\t%0., %1/z, %2., #%3 cmp\t%0., %1/z, %2., %3." ) ;; Predicated integer comparisons, formed by combining a PTRUE-predicated ;; comparison with an AND. Split the instruction into its preferred form ;; (below) at the earliest opportunity, in order to get rid of the ;; redundant operand 1. (define_insn_and_split "*pred_cmp_combine" [(set (match_operand: 0 "register_operand" "=Upa, Upa") (and: (unspec: [(match_operand: 1) (SVE_INT_CMP: (match_operand:SVE_I 2 "register_operand" "w, w") (match_operand:SVE_I 3 "aarch64_sve_cmp__operand" ", w"))] UNSPEC_MERGE_PTRUE) (match_operand: 4 "register_operand" "Upl, Upl"))) (clobber (reg:CC_NZC CC_REGNUM))] "TARGET_SVE" "#" "&& 1" [(parallel [(set (match_dup 0) (and: (SVE_INT_CMP: (match_dup 2) (match_dup 3)) (match_dup 4))) (clobber (reg:CC_NZC CC_REGNUM))])] ) ;; Predicated integer comparisons. (define_insn "*pred_cmp" [(set (match_operand: 0 "register_operand" "=Upa, Upa") (and: (SVE_INT_CMP: (match_operand:SVE_I 2 "register_operand" "w, w") (match_operand:SVE_I 3 "aarch64_sve_cmp__operand" ", w")) (match_operand: 1 "register_operand" "Upl, Upl"))) (clobber (reg:CC_NZC CC_REGNUM))] "TARGET_SVE" "@ cmp\t%0., %1/z, %2., #%3 cmp\t%0., %1/z, %2., %3." ) ;; ------------------------------------------------------------------------- ;; ---- [INT] While tests ;; ------------------------------------------------------------------------- ;; Includes: ;; - WHILELO ;; ------------------------------------------------------------------------- ;; Set element I of the result if operand1 + J < operand2 for all J in [0, I], ;; with the comparison being unsigned. (define_insn "while_ult" [(set (match_operand:PRED_ALL 0 "register_operand" "=Upa") (unspec:PRED_ALL [(match_operand:GPI 1 "aarch64_reg_or_zero" "rZ") (match_operand:GPI 2 "aarch64_reg_or_zero" "rZ")] UNSPEC_WHILE_LO)) (clobber (reg:CC_NZC CC_REGNUM))] "TARGET_SVE" "whilelo\t%0., %1, %2" ) ;; WHILELO sets the flags in the same way as a PTEST with a PTRUE GP. ;; Handle the case in which both results are useful. The GP operand ;; to the PTEST isn't needed, so we allow it to be anything. (define_insn_and_rewrite "*while_ult_cc" [(set (reg:CC_NZC CC_REGNUM) (unspec:CC_NZC [(match_operand:PRED_ALL 1) (unspec:PRED_ALL [(match_operand:GPI 2 "aarch64_reg_or_zero" "rZ") (match_operand:GPI 3 "aarch64_reg_or_zero" "rZ")] UNSPEC_WHILE_LO)] UNSPEC_PTEST_PTRUE)) (set (match_operand:PRED_ALL 0 "register_operand" "=Upa") (unspec:PRED_ALL [(match_dup 2) (match_dup 3)] UNSPEC_WHILE_LO))] "TARGET_SVE" "whilelo\t%0., %2, %3" ;; Force the compiler to drop the unused predicate operand, so that we ;; don't have an unnecessary PTRUE. "&& !CONSTANT_P (operands[1])" { operands[1] = CONSTM1_RTX (mode); } ) ;; ------------------------------------------------------------------------- ;; ---- [FP] Comparisons ;; ------------------------------------------------------------------------- ;; Includes: ;; - FCMEQ ;; - FCMGE ;; - FCMGT ;; - FCMLE ;; - FCMLT ;; - FCMNE ;; - FCMUO ;; ------------------------------------------------------------------------- ;; Floating-point comparisons. All comparisons except FCMUO allow a zero ;; operand; aarch64_expand_sve_vec_cmp_float handles the case of an FCMUO ;; with zero. (define_expand "vec_cmp" [(set (match_operand: 0 "register_operand") (match_operator: 1 "comparison_operator" [(match_operand:SVE_F 2 "register_operand") (match_operand:SVE_F 3 "aarch64_simd_reg_or_zero")]))] "TARGET_SVE" { aarch64_expand_sve_vec_cmp_float (operands[0], GET_CODE (operands[1]), operands[2], operands[3], false); DONE; } ) ;; Floating-point comparisons predicated with a PTRUE. (define_insn "*fcm" [(set (match_operand: 0 "register_operand" "=Upa, Upa") (unspec: [(match_operand: 1 "register_operand" "Upl, Upl") (SVE_FP_CMP: (match_operand:SVE_F 2 "register_operand" "w, w") (match_operand:SVE_F 3 "aarch64_simd_reg_or_zero" "Dz, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ fcm\t%0., %1/z, %2., #0.0 fcm\t%0., %1/z, %2., %3." ) ;; Same for unordered comparisons. (define_insn "*fcmuo" [(set (match_operand: 0 "register_operand" "=Upa") (unspec: [(match_operand: 1 "register_operand" "Upl") (unordered: (match_operand:SVE_F 2 "register_operand" "w") (match_operand:SVE_F 3 "register_operand" "w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "fcmuo\t%0., %1/z, %2., %3." ) ;; Floating-point comparisons predicated on a PTRUE, with the results ANDed ;; with another predicate P. This does not have the same trapping behavior ;; as predicating the comparison itself on P, but it's a legitimate fold, ;; since we can drop any potentially-trapping operations whose results ;; are not needed. ;; ;; Split the instruction into its preferred form (below) at the earliest ;; opportunity, in order to get rid of the redundant operand 1. (define_insn_and_split "*fcm_and_combine" [(set (match_operand: 0 "register_operand" "=Upa, Upa") (and: (unspec: [(match_operand: 1) (SVE_FP_CMP (match_operand:SVE_F 2 "register_operand" "w, w") (match_operand:SVE_F 3 "aarch64_simd_reg_or_zero" "Dz, w"))] UNSPEC_MERGE_PTRUE) (match_operand: 4 "register_operand" "Upl, Upl")))] "TARGET_SVE" "#" "&& 1" [(set (match_dup 0) (and: (SVE_FP_CMP: (match_dup 2) (match_dup 3)) (match_dup 4)))] ) ;; Same for unordered comparisons. (define_insn_and_split "*fcmuo_and_combine" [(set (match_operand: 0 "register_operand" "=Upa") (and: (unspec: [(match_operand: 1) (unordered (match_operand:SVE_F 2 "register_operand" "w") (match_operand:SVE_F 3 "register_operand" "w"))] UNSPEC_MERGE_PTRUE) (match_operand: 4 "register_operand" "Upl")))] "TARGET_SVE" "#" "&& 1" [(set (match_dup 0) (and: (unordered: (match_dup 2) (match_dup 3)) (match_dup 4)))] ) ;; Unpredicated floating-point comparisons, with the results ANDed with ;; another predicate. This is a valid fold for the same reasons as above. (define_insn "*fcm_and" [(set (match_operand: 0 "register_operand" "=Upa, Upa") (and: (SVE_FP_CMP: (match_operand:SVE_F 2 "register_operand" "w, w") (match_operand:SVE_F 3 "aarch64_simd_reg_or_zero" "Dz, w")) (match_operand: 1 "register_operand" "Upl, Upl")))] "TARGET_SVE" "@ fcm\t%0., %1/z, %2., #0.0 fcm\t%0., %1/z, %2., %3." ) ;; Same for unordered comparisons. (define_insn "*fcmuo_and" [(set (match_operand: 0 "register_operand" "=Upa") (and: (unordered: (match_operand:SVE_F 2 "register_operand" "w") (match_operand:SVE_F 3 "register_operand" "w")) (match_operand: 1 "register_operand" "Upl")))] "TARGET_SVE" "fcmuo\t%0., %1/z, %2., %3." ) ;; Predicated floating-point comparisons. We don't need a version ;; of this for unordered comparisons. (define_insn "*pred_fcm" [(set (match_operand: 0 "register_operand" "=Upa, Upa") (unspec: [(match_operand: 1 "register_operand" "Upl, Upl") (match_operand:SVE_F 2 "register_operand" "w, w") (match_operand:SVE_F 3 "aarch64_simd_reg_or_zero" "Dz, w")] SVE_COND_FP_CMP))] "TARGET_SVE" "@ fcm\t%0., %1/z, %2., #0.0 fcm\t%0., %1/z, %2., %3." ) ;; ------------------------------------------------------------------------- ;; ---- [PRED] Test bits ;; ------------------------------------------------------------------------- ;; Includes: ;; - PTEST ;; ------------------------------------------------------------------------- ;; Branch based on predicate equality or inequality. (define_expand "cbranch4" [(set (pc) (if_then_else (match_operator 0 "aarch64_equality_operator" [(match_operand:PRED_ALL 1 "register_operand") (match_operand:PRED_ALL 2 "aarch64_simd_reg_or_zero")]) (label_ref (match_operand 3 "")) (pc)))] "" { rtx ptrue = aarch64_ptrue_reg (mode); rtx pred; if (operands[2] == CONST0_RTX (mode)) pred = operands[1]; else { pred = gen_reg_rtx (mode); emit_insn (gen_pred_xor3 (pred, ptrue, operands[1], operands[2])); } emit_insn (gen_ptest_ptrue (ptrue, pred)); operands[1] = gen_rtx_REG (CC_NZCmode, CC_REGNUM); operands[2] = const0_rtx; } ) ;; Test all bits of operand 1. Operand 0 is a GP that is known to hold PTRUE. ;; ;; Using UNSPEC_PTEST_PTRUE allows combine patterns to assume that the GP ;; is a PTRUE even if the optimizers haven't yet been able to propagate ;; the constant. We would use a separate unspec code for PTESTs involving ;; GPs that might not be PTRUEs. (define_insn "ptest_ptrue" [(set (reg:CC_NZC CC_REGNUM) (unspec:CC_NZC [(match_operand:PRED_ALL 0 "register_operand" "Upa") (match_operand:PRED_ALL 1 "register_operand" "Upa")] UNSPEC_PTEST_PTRUE))] "TARGET_SVE" "ptest\t%0, %1.b" ) ;; ========================================================================= ;; == Reductions ;; ========================================================================= ;; ------------------------------------------------------------------------- ;; ---- [INT,FP] Conditional reductions ;; ------------------------------------------------------------------------- ;; Includes: ;; - CLASTB ;; ------------------------------------------------------------------------- ;; Set operand 0 to the last active element in operand 3, or to tied ;; operand 1 if no elements are active. (define_insn "fold_extract_last_" [(set (match_operand: 0 "register_operand" "=?r, w") (unspec: [(match_operand: 1 "register_operand" "0, 0") (match_operand: 2 "register_operand" "Upl, Upl") (match_operand:SVE_ALL 3 "register_operand" "w, w")] UNSPEC_CLASTB))] "TARGET_SVE" "@ clastb\t%0, %2, %0, %3. clastb\t%0, %2, %0, %3." ) ;; ------------------------------------------------------------------------- ;; ---- [INT] Tree reductions ;; ------------------------------------------------------------------------- ;; Includes: ;; - ANDV ;; - EORV ;; - ORV ;; - SMAXV ;; - SMINV ;; - UADDV ;; - UMAXV ;; - UMINV ;; ------------------------------------------------------------------------- ;; Unpredicated integer add reduction. (define_expand "reduc_plus_scal_" [(set (match_operand: 0 "register_operand") (unspec: [(match_dup 2) (match_operand:SVE_I 1 "register_operand")] UNSPEC_ADDV))] "TARGET_SVE" { operands[2] = aarch64_ptrue_reg (mode); } ) ;; Predicated integer add reduction. The result is always 64-bits. (define_insn "*reduc_plus_scal_" [(set (match_operand: 0 "register_operand" "=w") (unspec: [(match_operand: 1 "register_operand" "Upl") (match_operand:SVE_I 2 "register_operand" "w")] UNSPEC_ADDV))] "TARGET_SVE" "uaddv\t%d0, %1, %2." ) ;; Unpredicated integer reductions. (define_expand "reduc__scal_" [(set (match_operand: 0 "register_operand") (unspec: [(match_dup 2) (match_operand:SVE_I 1 "register_operand")] SVE_INT_REDUCTION))] "TARGET_SVE" { operands[2] = aarch64_ptrue_reg (mode); } ) ;; Predicated integer reductions. (define_insn "*reduc__scal_" [(set (match_operand: 0 "register_operand" "=w") (unspec: [(match_operand: 1 "register_operand" "Upl") (match_operand:SVE_I 2 "register_operand" "w")] SVE_INT_REDUCTION))] "TARGET_SVE" "\t%0, %1, %2." ) ;; ------------------------------------------------------------------------- ;; ---- [FP] Tree reductions ;; ------------------------------------------------------------------------- ;; Includes: ;; - FADDV ;; - FMAXNMV ;; - FMAXV ;; - FMINNMV ;; - FMINV ;; ------------------------------------------------------------------------- ;; Unpredicated floating-point tree reductions. (define_expand "reduc__scal_" [(set (match_operand: 0 "register_operand") (unspec: [(match_dup 2) (match_operand:SVE_F 1 "register_operand")] SVE_FP_REDUCTION))] "TARGET_SVE" { operands[2] = aarch64_ptrue_reg (mode); } ) ;; Predicated floating-point tree reductions. (define_insn "*reduc__scal_" [(set (match_operand: 0 "register_operand" "=w") (unspec: [(match_operand: 1 "register_operand" "Upl") (match_operand:SVE_F 2 "register_operand" "w")] SVE_FP_REDUCTION))] "TARGET_SVE" "\t%0, %1, %2." ) ;; ------------------------------------------------------------------------- ;; ---- [FP] Left-to-right reductions ;; ------------------------------------------------------------------------- ;; Includes: ;; - FADDA ;; ------------------------------------------------------------------------- ;; Unpredicated in-order FP reductions. (define_expand "fold_left_plus_" [(set (match_operand: 0 "register_operand") (unspec: [(match_dup 3) (match_operand: 1 "register_operand") (match_operand:SVE_F 2 "register_operand")] UNSPEC_FADDA))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); } ) ;; Predicated in-order FP reductions. (define_insn "mask_fold_left_plus_" [(set (match_operand: 0 "register_operand" "=w") (unspec: [(match_operand: 3 "register_operand" "Upl") (match_operand: 1 "register_operand" "0") (match_operand:SVE_F 2 "register_operand" "w")] UNSPEC_FADDA))] "TARGET_SVE" "fadda\t%0, %3, %0, %2." ) ;; ========================================================================= ;; == Permutes ;; ========================================================================= ;; ------------------------------------------------------------------------- ;; ---- [INT,FP] General permutes ;; ------------------------------------------------------------------------- ;; Includes: ;; - TBL ;; ------------------------------------------------------------------------- (define_expand "vec_perm" [(match_operand:SVE_ALL 0 "register_operand") (match_operand:SVE_ALL 1 "register_operand") (match_operand:SVE_ALL 2 "register_operand") (match_operand: 3 "aarch64_sve_vec_perm_operand")] "TARGET_SVE && GET_MODE_NUNITS (mode).is_constant ()" { aarch64_expand_sve_vec_perm (operands[0], operands[1], operands[2], operands[3]); DONE; } ) (define_insn "*aarch64_sve_tbl" [(set (match_operand:SVE_ALL 0 "register_operand" "=w") (unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand" "w") (match_operand: 2 "register_operand" "w")] UNSPEC_TBL))] "TARGET_SVE" "tbl\t%0., %1., %2." ) ;; ------------------------------------------------------------------------- ;; ---- [INT,FP] Special-purpose unary permutes ;; ------------------------------------------------------------------------- ;; Includes: ;; - DUP ;; - REV ;; - REVB ;; - REVH ;; - REVW ;; ------------------------------------------------------------------------- ;; Duplicate one element of a vector. (define_insn "*aarch64_sve_dup_lane" [(set (match_operand:SVE_ALL 0 "register_operand" "=w") (vec_duplicate:SVE_ALL (vec_select: (match_operand:SVE_ALL 1 "register_operand" "w") (parallel [(match_operand:SI 2 "const_int_operand")]))))] "TARGET_SVE && IN_RANGE (INTVAL (operands[2]) * GET_MODE_SIZE (mode), 0, 63)" "dup\t%0., %1.[%2]" ) ;; Reverse the order of elements within a full vector. (define_insn "@aarch64_sve_rev" [(set (match_operand:SVE_ALL 0 "register_operand" "=w") (unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand" "w")] UNSPEC_REV))] "TARGET_SVE" "rev\t%0., %1.") ;; Reverse the order elements within a 64-bit container. (define_insn "*aarch64_sve_rev64" [(set (match_operand:SVE_BHS 0 "register_operand" "=w") (unspec:SVE_BHS [(match_operand:VNx2BI 1 "register_operand" "Upl") (unspec:SVE_BHS [(match_operand:SVE_BHS 2 "register_operand" "w")] UNSPEC_REV64)] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "rev\t%0.d, %1/m, %2.d" ) ;; Reverse the order elements within a 32-bit container. (define_insn "*aarch64_sve_rev32" [(set (match_operand:SVE_BH 0 "register_operand" "=w") (unspec:SVE_BH [(match_operand:VNx4BI 1 "register_operand" "Upl") (unspec:SVE_BH [(match_operand:SVE_BH 2 "register_operand" "w")] UNSPEC_REV32)] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "rev\t%0.s, %1/m, %2.s" ) ;; Reverse the order elements within a 16-bit container. (define_insn "*aarch64_sve_rev16vnx16qi" [(set (match_operand:VNx16QI 0 "register_operand" "=w") (unspec:VNx16QI [(match_operand:VNx8BI 1 "register_operand" "Upl") (unspec:VNx16QI [(match_operand:VNx16QI 2 "register_operand" "w")] UNSPEC_REV16)] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "revb\t%0.h, %1/m, %2.h" ) ;; ------------------------------------------------------------------------- ;; ---- [INT,FP] Special-purpose binary permutes ;; ------------------------------------------------------------------------- ;; Includes: ;; - TRN1 ;; - TRN2 ;; - UZP1 ;; - UZP2 ;; - ZIP1 ;; - ZIP2 ;; ------------------------------------------------------------------------- ;; Permutes that take half the elements from one vector and half the ;; elements from the other. (define_insn "aarch64_sve_" [(set (match_operand:SVE_ALL 0 "register_operand" "=w") (unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand" "w") (match_operand:SVE_ALL 2 "register_operand" "w")] PERMUTE))] "TARGET_SVE" "\t%0., %1., %2." ) ;; Concatenate two vectors and extract a subvector. Note that the ;; immediate (third) operand is the lane index not the byte index. (define_insn "*aarch64_sve_ext" [(set (match_operand:SVE_ALL 0 "register_operand" "=w") (unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand" "0") (match_operand:SVE_ALL 2 "register_operand" "w") (match_operand:SI 3 "const_int_operand")] UNSPEC_EXT))] "TARGET_SVE && IN_RANGE (INTVAL (operands[3]) * GET_MODE_SIZE (mode), 0, 255)" { operands[3] = GEN_INT (INTVAL (operands[3]) * GET_MODE_SIZE (mode)); return "ext\\t%0.b, %0.b, %2.b, #%3"; } ) ;; ------------------------------------------------------------------------- ;; ---- [PRED] Special-purpose binary permutes ;; ------------------------------------------------------------------------- ;; Includes: ;; - TRN1 ;; - TRN2 ;; - UZP1 ;; - UZP2 ;; - ZIP1 ;; - ZIP2 ;; ------------------------------------------------------------------------- ;; Permutes that take half the elements from one vector and half the ;; elements from the other. (define_insn "*aarch64_sve_" [(set (match_operand:PRED_ALL 0 "register_operand" "=Upa") (unspec:PRED_ALL [(match_operand:PRED_ALL 1 "register_operand" "Upa") (match_operand:PRED_ALL 2 "register_operand" "Upa")] PERMUTE))] "TARGET_SVE" "\t%0., %1., %2." ) ;; ========================================================================= ;; == Conversions ;; ========================================================================= ;; ------------------------------------------------------------------------- ;; ---- [INT<-INT] Packs ;; ------------------------------------------------------------------------- ;; Includes: ;; - UZP1 ;; ------------------------------------------------------------------------- ;; Integer pack. Use UZP1 on the narrower type, which discards ;; the high part of each wide element. (define_insn "vec_pack_trunc_" [(set (match_operand:SVE_BHSI 0 "register_operand" "=w") (unspec:SVE_BHSI [(match_operand: 1 "register_operand" "w") (match_operand: 2 "register_operand" "w")] UNSPEC_PACK))] "TARGET_SVE" "uzp1\t%0., %1., %2." ) ;; ------------------------------------------------------------------------- ;; ---- [INT<-INT] Unpacks ;; ------------------------------------------------------------------------- ;; Includes: ;; - SUNPKHI ;; - SUNPKLO ;; - UUNPKHI ;; - UUNPKLO ;; ------------------------------------------------------------------------- ;; Unpack the low or high half of a vector, where "high" refers to ;; the low-numbered lanes for big-endian and the high-numbered lanes ;; for little-endian. (define_expand "vec_unpack__" [(match_operand: 0 "register_operand") (unspec: [(match_operand:SVE_BHSI 1 "register_operand")] UNPACK)] "TARGET_SVE" { emit_insn (( ? gen_aarch64_sve_unpkhi_ : gen_aarch64_sve_unpklo_) (operands[0], operands[1])); DONE; } ) (define_insn "aarch64_sve_unpk_" [(set (match_operand: 0 "register_operand" "=w") (unspec: [(match_operand:SVE_BHSI 1 "register_operand" "w")] UNPACK))] "TARGET_SVE" "unpk\t%0., %1." ) ;; ------------------------------------------------------------------------- ;; ---- [INT<-FP] Conversions ;; ------------------------------------------------------------------------- ;; Includes: ;; - FCVTZS ;; - FCVTZU ;; ------------------------------------------------------------------------- ;; Unpredicated conversion of floats to integers of the same size (HF to HI, ;; SF to SI or DF to DI). (define_expand "2" [(set (match_operand: 0 "register_operand") (unspec: [(match_dup 2) (FIXUORS: (match_operand:SVE_F 1 "register_operand"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" { operands[2] = aarch64_ptrue_reg (mode); } ) ;; Conversion of SF to DI, SI or HI, predicated with a PTRUE. (define_insn "*v16hsf2" [(set (match_operand:SVE_HSDI 0 "register_operand" "=w") (unspec:SVE_HSDI [(match_operand: 1 "register_operand" "Upl") (FIXUORS:SVE_HSDI (match_operand:VNx8HF 2 "register_operand" "w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "fcvtz\t%0., %1/m, %2.h" ) ;; Conversion of SF to DI or SI, predicated with a PTRUE. (define_insn "*vnx4sf2" [(set (match_operand:SVE_SDI 0 "register_operand" "=w") (unspec:SVE_SDI [(match_operand: 1 "register_operand" "Upl") (FIXUORS:SVE_SDI (match_operand:VNx4SF 2 "register_operand" "w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "fcvtz\t%0., %1/m, %2.s" ) ;; Conversion of DF to DI or SI, predicated with a PTRUE. (define_insn "*vnx2df2" [(set (match_operand:SVE_SDI 0 "register_operand" "=w") (unspec:SVE_SDI [(match_operand:VNx2BI 1 "register_operand" "Upl") (FIXUORS:SVE_SDI (match_operand:VNx2DF 2 "register_operand" "w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "fcvtz\t%0., %1/m, %2.d" ) ;; ------------------------------------------------------------------------- ;; ---- [INT<-FP] Packs ;; ------------------------------------------------------------------------- ;; The patterns in this section are synthetic. ;; ------------------------------------------------------------------------- ;; Convert two vectors of DF to SI and pack the results into a single vector. (define_expand "vec_pack_fix_trunc_vnx2df" [(set (match_dup 4) (unspec:VNx4SI [(match_dup 3) (FIXUORS:VNx4SI (match_operand:VNx2DF 1 "register_operand"))] UNSPEC_MERGE_PTRUE)) (set (match_dup 5) (unspec:VNx4SI [(match_dup 3) (FIXUORS:VNx4SI (match_operand:VNx2DF 2 "register_operand"))] UNSPEC_MERGE_PTRUE)) (set (match_operand:VNx4SI 0 "register_operand") (unspec:VNx4SI [(match_dup 4) (match_dup 5)] UNSPEC_UZP1))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (VNx2BImode); operands[4] = gen_reg_rtx (VNx4SImode); operands[5] = gen_reg_rtx (VNx4SImode); } ) ;; ------------------------------------------------------------------------- ;; ---- [INT<-FP] Unpacks ;; ------------------------------------------------------------------------- ;; No patterns here yet! ;; ------------------------------------------------------------------------- ;; ------------------------------------------------------------------------- ;; ---- [FP<-INT] Conversions ;; ------------------------------------------------------------------------- ;; Includes: ;; - SCVTF ;; - UCVTF ;; ------------------------------------------------------------------------- ;; Unpredicated conversion of integers to floats of the same size ;; (HI to HF, SI to SF or DI to DF). (define_expand "2" [(set (match_operand:SVE_F 0 "register_operand") (unspec:SVE_F [(match_dup 2) (FLOATUORS:SVE_F (match_operand: 1 "register_operand"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" { operands[2] = aarch64_ptrue_reg (mode); } ) ;; Conversion of DI, SI or HI to the same number of HFs, predicated ;; with a PTRUE. (define_insn "*vnx8hf2" [(set (match_operand:VNx8HF 0 "register_operand" "=w") (unspec:VNx8HF [(match_operand: 1 "register_operand" "Upl") (FLOATUORS:VNx8HF (match_operand:SVE_HSDI 2 "register_operand" "w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "cvtf\t%0.h, %1/m, %2." ) ;; Conversion of DI or SI to the same number of SFs, predicated with a PTRUE. (define_insn "*vnx4sf2" [(set (match_operand:VNx4SF 0 "register_operand" "=w") (unspec:VNx4SF [(match_operand: 1 "register_operand" "Upl") (FLOATUORS:VNx4SF (match_operand:SVE_SDI 2 "register_operand" "w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "cvtf\t%0.s, %1/m, %2." ) ;; Conversion of DI or SI to DF, predicated with a PTRUE. (define_insn "aarch64_sve_vnx2df2" [(set (match_operand:VNx2DF 0 "register_operand" "=w") (unspec:VNx2DF [(match_operand:VNx2BI 1 "register_operand" "Upl") (FLOATUORS:VNx2DF (match_operand:SVE_SDI 2 "register_operand" "w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "cvtf\t%0.d, %1/m, %2." ) ;; ------------------------------------------------------------------------- ;; ---- [FP<-INT] Packs ;; ------------------------------------------------------------------------- ;; No patterns here yet! ;; ------------------------------------------------------------------------- ;; ------------------------------------------------------------------------- ;; ---- [FP<-INT] Unpacks ;; ------------------------------------------------------------------------- ;; The patterns in this section are synthetic. ;; ------------------------------------------------------------------------- ;; Unpack one half of a VNx4SI to VNx2DF. First unpack from VNx4SI ;; to VNx2DI, reinterpret the VNx2DI as a VNx4SI, then convert the ;; unpacked VNx4SI to VNx2DF. (define_expand "vec_unpack_float__vnx4si" [(match_operand:VNx2DF 0 "register_operand") (FLOATUORS:VNx2DF (unspec:VNx2DI [(match_operand:VNx4SI 1 "register_operand")] UNPACK_UNSIGNED))] "TARGET_SVE" { /* Use ZIP to do the unpack, since we don't care about the upper halves and since it has the nice property of not needing any subregs. If using UUNPK* turns out to be preferable, we could model it as a ZIP whose first operand is zero. */ rtx temp = gen_reg_rtx (VNx4SImode); emit_insn (( ? gen_aarch64_sve_zip2vnx4si : gen_aarch64_sve_zip1vnx4si) (temp, operands[1], operands[1])); rtx ptrue = aarch64_ptrue_reg (VNx2BImode); emit_insn (gen_aarch64_sve_vnx4sivnx2df2 (operands[0], ptrue, temp)); DONE; } ) ;; ------------------------------------------------------------------------- ;; ---- [FP<-FP] Packs ;; ------------------------------------------------------------------------- ;; Includes: ;; - FCVT ;; ------------------------------------------------------------------------- ;; Convert two vectors of DF to SF, or two vectors of SF to HF, and pack ;; the results into a single vector. (define_expand "vec_pack_trunc_" [(set (match_dup 4) (unspec:SVE_HSF [(match_dup 3) (unspec:SVE_HSF [(match_operand: 1 "register_operand")] UNSPEC_FLOAT_CONVERT)] UNSPEC_MERGE_PTRUE)) (set (match_dup 5) (unspec:SVE_HSF [(match_dup 3) (unspec:SVE_HSF [(match_operand: 2 "register_operand")] UNSPEC_FLOAT_CONVERT)] UNSPEC_MERGE_PTRUE)) (set (match_operand:SVE_HSF 0 "register_operand") (unspec:SVE_HSF [(match_dup 4) (match_dup 5)] UNSPEC_UZP1))] "TARGET_SVE" { operands[3] = aarch64_ptrue_reg (mode); operands[4] = gen_reg_rtx (mode); operands[5] = gen_reg_rtx (mode); } ) ;; Conversion of DFs to the same number of SFs, or SFs to the same number ;; of HFs. (define_insn "*trunc2" [(set (match_operand:SVE_HSF 0 "register_operand" "=w") (unspec:SVE_HSF [(match_operand: 1 "register_operand" "Upl") (unspec:SVE_HSF [(match_operand: 2 "register_operand" "w")] UNSPEC_FLOAT_CONVERT)] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "fcvt\t%0., %1/m, %2." ) ;; ------------------------------------------------------------------------- ;; ---- [FP<-FP] Unpacks ;; ------------------------------------------------------------------------- ;; Includes: ;; - FCVT ;; ------------------------------------------------------------------------- ;; Unpack one half of a VNx4SF to VNx2DF, or one half of a VNx8HF to VNx4SF. ;; First unpack the source without conversion, then float-convert the ;; unpacked source. (define_expand "vec_unpacks__" [(match_operand: 0 "register_operand") (unspec:SVE_HSF [(match_operand:SVE_HSF 1 "register_operand")] UNPACK_UNSIGNED)] "TARGET_SVE" { /* Use ZIP to do the unpack, since we don't care about the upper halves and since it has the nice property of not needing any subregs. If using UUNPK* turns out to be preferable, we could model it as a ZIP whose first operand is zero. */ rtx temp = gen_reg_rtx (mode); emit_insn (( ? gen_aarch64_sve_zip2 : gen_aarch64_sve_zip1) (temp, operands[1], operands[1])); rtx ptrue = aarch64_ptrue_reg (mode); emit_insn (gen_aarch64_sve_extend2 (operands[0], ptrue, temp)); DONE; } ) ;; Conversion of SFs to the same number of DFs, or HFs to the same number ;; of SFs. (define_insn "aarch64_sve_extend2" [(set (match_operand: 0 "register_operand" "=w") (unspec: [(match_operand: 1 "register_operand" "Upl") (unspec: [(match_operand:SVE_HSF 2 "register_operand" "w")] UNSPEC_FLOAT_CONVERT)] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "fcvt\t%0., %1/m, %2." ) ;; ------------------------------------------------------------------------- ;; ---- [PRED<-PRED] Packs ;; ------------------------------------------------------------------------- ;; Includes: ;; - UZP1 ;; ------------------------------------------------------------------------- ;; Predicate pack. Use UZP1 on the narrower type, which discards ;; the high part of each wide element. (define_insn "vec_pack_trunc_" [(set (match_operand:PRED_BHS 0 "register_operand" "=Upa") (unspec:PRED_BHS [(match_operand: 1 "register_operand" "Upa") (match_operand: 2 "register_operand" "Upa")] UNSPEC_PACK))] "TARGET_SVE" "uzp1\t%0., %1., %2." ) ;; ------------------------------------------------------------------------- ;; ---- [PRED<-PRED] Unpacks ;; ------------------------------------------------------------------------- ;; Includes: ;; - PUNPKHI ;; - PUNPKLO ;; ------------------------------------------------------------------------- ;; Unpack the low or high half of a predicate, where "high" refers to ;; the low-numbered lanes for big-endian and the high-numbered lanes ;; for little-endian. (define_expand "vec_unpack__" [(match_operand: 0 "register_operand") (unspec: [(match_operand:PRED_BHS 1 "register_operand")] UNPACK)] "TARGET_SVE" { emit_insn (( ? gen_aarch64_sve_punpkhi_ : gen_aarch64_sve_punpklo_) (operands[0], operands[1])); DONE; } ) (define_insn "aarch64_sve_punpk_" [(set (match_operand: 0 "register_operand" "=Upa") (unspec: [(match_operand:PRED_BHS 1 "register_operand" "Upa")] UNPACK_UNSIGNED))] "TARGET_SVE" "punpk\t%0.h, %1.b" )