;; Machine description for AArch64 architecture.
;; Copyright (C) 2009-2013 Free Software Foundation, Inc.
;; Contributed by ARM Ltd.
;;
;; This file is part of GCC.
;;
;; GCC is free software; you can redistribute it and/or modify it
;; under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;;
;; GCC is distributed in the hope that it will be useful, but
;; WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;; General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; .
;; Register numbers
(define_constants
[
(R0_REGNUM 0)
(R1_REGNUM 1)
(R2_REGNUM 2)
(R3_REGNUM 3)
(R4_REGNUM 4)
(R5_REGNUM 5)
(R6_REGNUM 6)
(R7_REGNUM 7)
(R8_REGNUM 8)
(R9_REGNUM 9)
(R10_REGNUM 10)
(R11_REGNUM 11)
(R12_REGNUM 12)
(R13_REGNUM 13)
(R14_REGNUM 14)
(R15_REGNUM 15)
(R16_REGNUM 16)
(IP0_REGNUM 16)
(R17_REGNUM 17)
(IP1_REGNUM 17)
(R18_REGNUM 18)
(R19_REGNUM 19)
(R20_REGNUM 20)
(R21_REGNUM 21)
(R22_REGNUM 22)
(R23_REGNUM 23)
(R24_REGNUM 24)
(R25_REGNUM 25)
(R26_REGNUM 26)
(R27_REGNUM 27)
(R28_REGNUM 28)
(R29_REGNUM 29)
(R30_REGNUM 30)
(LR_REGNUM 30)
(SP_REGNUM 31)
(V0_REGNUM 32)
(V15_REGNUM 47)
(V31_REGNUM 63)
(SFP_REGNUM 64)
(AP_REGNUM 65)
(CC_REGNUM 66)
]
)
(define_c_enum "unspec" [
UNSPEC_CASESI
UNSPEC_CLS
UNSPEC_FRECPE
UNSPEC_FRECPS
UNSPEC_FRECPX
UNSPEC_FRINTA
UNSPEC_FRINTI
UNSPEC_FRINTM
UNSPEC_FRINTN
UNSPEC_FRINTP
UNSPEC_FRINTX
UNSPEC_FRINTZ
UNSPEC_GOTSMALLPIC
UNSPEC_GOTSMALLTLS
UNSPEC_LD2
UNSPEC_LD3
UNSPEC_LD4
UNSPEC_MB
UNSPEC_NOP
UNSPEC_PRLG_STK
UNSPEC_RBIT
UNSPEC_ST2
UNSPEC_ST3
UNSPEC_ST4
UNSPEC_TLS
UNSPEC_TLSDESC
UNSPEC_VSTRUCTDUMMY
])
(define_c_enum "unspecv" [
UNSPECV_EH_RETURN ; Represent EH_RETURN
]
)
;; If further include files are added the defintion of MD_INCLUDES
;; must be updated.
(include "constraints.md")
(include "predicates.md")
(include "iterators.md")
;; -------------------------------------------------------------------
;; Instruction types and attributes
;; -------------------------------------------------------------------
;; Main data types used by the insntructions
(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF"
(const_string "unknown"))
(define_attr "mode2" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF"
(const_string "unknown"))
; The "v8type" attribute is used to for fine grained classification of
; AArch64 instructions. This table briefly explains the meaning of each type.
; adc add/subtract with carry.
; adcs add/subtract with carry (setting condition flags).
; adr calculate address.
; alu simple alu instruction (no memory or fp regs access).
; alu_ext simple alu instruction (sign/zero-extended register).
; alu_shift simple alu instruction, with a source operand shifted by a constant.
; alus simple alu instruction (setting condition flags).
; alus_ext simple alu instruction (sign/zero-extended register, setting condition flags).
; alus_shift simple alu instruction, with a source operand shifted by a constant (setting condition flags).
; bfm bitfield move operation.
; branch branch.
; call subroutine call.
; ccmp conditional compare.
; clz count leading zeros/sign bits.
; csel conditional select.
; dmb data memory barrier.
; extend sign/zero-extend (specialised bitfield move).
; extr extract register-sized bitfield encoding.
; fpsimd_load load single floating point / simd scalar register from memory.
; fpsimd_load2 load pair of floating point / simd scalar registers from memory.
; fpsimd_store store single floating point / simd scalar register to memory.
; fpsimd_store2 store pair floating point / simd scalar registers to memory.
; fadd floating point add/sub.
; fccmp floating point conditional compare.
; fcmp floating point comparison.
; fconst floating point load immediate.
; fcsel floating point conditional select.
; fcvt floating point convert (float to float).
; fcvtf2i floating point convert (float to integer).
; fcvti2f floating point convert (integer to float).
; fdiv floating point division operation.
; ffarith floating point abs, neg or cpy.
; fmadd floating point multiply-add/sub.
; fminmax floating point min/max.
; fmov floating point move (float to float).
; fmovf2i floating point move (float to integer).
; fmovi2f floating point move (integer to float).
; fmul floating point multiply.
; frint floating point round to integral.
; fsqrt floating point square root.
; load_acq load-acquire.
; load load single general register from memory
; load2 load pair of general registers from memory
; logic logical operation (register).
; logic_imm and/or/xor operation (immediate).
; logic_shift logical operation with shift.
; logics logical operation (register, setting condition flags).
; logics_imm and/or/xor operation (immediate, setting condition flags).
; logics_shift logical operation with shift (setting condition flags).
; madd integer multiply-add/sub.
; maddl widening integer multiply-add/sub.
; misc miscellaneous - any type that doesn't fit into the rest.
; move integer move operation.
; move2 double integer move operation.
; movk move 16-bit immediate with keep.
; movz move 16-bit immmediate with zero/one.
; mrs system/special register move.
; mulh 64x64 to 128-bit multiply (high part).
; mull widening multiply.
; mult integer multiply instruction.
; prefetch memory prefetch.
; rbit reverse bits.
; rev reverse bytes.
; sdiv integer division operation (signed).
; shift variable shift operation.
; shift_imm immediate shift operation (specialised bitfield move).
; store_rel store-release.
; store store single general register to memory.
; store2 store pair of general registers to memory.
; udiv integer division operation (unsigned).
(define_attr "v8type"
"adc,\
adcs,\
adr,\
alu,\
alu_ext,\
alu_shift,\
alus,\
alus_ext,\
alus_shift,\
bfm,\
branch,\
call,\
ccmp,\
clz,\
csel,\
dmb,\
div,\
div64,\
extend,\
extr,\
fpsimd_load,\
fpsimd_load2,\
fpsimd_store2,\
fpsimd_store,\
fadd,\
fccmp,\
fcvt,\
fcvtf2i,\
fcvti2f,\
fcmp,\
fconst,\
fcsel,\
fdiv,\
ffarith,\
fmadd,\
fminmax,\
fmov,\
fmovf2i,\
fmovi2f,\
fmul,\
frecpe,\
frecps,\
frecpx,\
frint,\
fsqrt,\
load_acq,\
load1,\
load2,\
logic,\
logic_imm,\
logic_shift,\
logics,\
logics_imm,\
logics_shift,\
madd,\
maddl,\
misc,\
move,\
move2,\
movk,\
movz,\
mrs,\
mulh,\
mull,\
mult,\
prefetch,\
rbit,\
rev,\
sdiv,\
shift,\
shift_imm,\
store_rel,\
store1,\
store2,\
udiv"
(const_string "alu"))
; The "type" attribute is used by the AArch32 backend. Below is a mapping
; from "v8type" to "type".
(define_attr "type"
"alu,alu_shift,block,branch,call,f_2_r,f_cvt,f_flag,f_loads,
f_loadd,f_stored,f_stores,faddd,fadds,fcmpd,fcmps,fconstd,fconsts,
fcpys,fdivd,fdivs,ffarithd,ffariths,fmacd,fmacs,fmuld,fmuls,load_byte,
load1,load2,mult,r_2_f,store1,store2"
(cond [
(eq_attr "v8type" "alu_shift,alus_shift,logic_shift,logics_shift") (const_string "alu_shift")
(eq_attr "v8type" "branch") (const_string "branch")
(eq_attr "v8type" "call") (const_string "call")
(eq_attr "v8type" "fmovf2i") (const_string "f_2_r")
(eq_attr "v8type" "fcvt,fcvtf2i,fcvti2f") (const_string "f_cvt")
(and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "SF")) (const_string "f_loads")
(and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "DF")) (const_string "f_loadd")
(and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "SF")) (const_string "f_stores")
(and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "DF")) (const_string "f_stored")
(and (eq_attr "v8type" "fadd,fminmax") (eq_attr "mode" "DF")) (const_string "faddd")
(and (eq_attr "v8type" "fadd,fminmax") (eq_attr "mode" "SF")) (const_string "fadds")
(and (eq_attr "v8type" "fcmp,fccmp") (eq_attr "mode" "DF")) (const_string "fcmpd")
(and (eq_attr "v8type" "fcmp,fccmp") (eq_attr "mode" "SF")) (const_string "fcmps")
(and (eq_attr "v8type" "fconst") (eq_attr "mode" "DF")) (const_string "fconstd")
(and (eq_attr "v8type" "fconst") (eq_attr "mode" "SF")) (const_string "fconsts")
(and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "DF")) (const_string "fdivd")
(and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "SF")) (const_string "fdivs")
(and (eq_attr "v8type" "ffarith") (eq_attr "mode" "DF")) (const_string "ffarithd")
(and (eq_attr "v8type" "ffarith") (eq_attr "mode" "SF")) (const_string "ffariths")
(and (eq_attr "v8type" "fmadd") (eq_attr "mode" "DF")) (const_string "fmacd")
(and (eq_attr "v8type" "fmadd") (eq_attr "mode" "SF")) (const_string "fmacs")
(and (eq_attr "v8type" "fmul") (eq_attr "mode" "DF")) (const_string "fmuld")
(and (eq_attr "v8type" "fmul") (eq_attr "mode" "SF")) (const_string "fmuls")
(and (eq_attr "v8type" "load1") (eq_attr "mode" "QI,HI")) (const_string "load_byte")
(and (eq_attr "v8type" "load1") (eq_attr "mode" "SI,DI,TI")) (const_string "load1")
(eq_attr "v8type" "load2") (const_string "load2")
(and (eq_attr "v8type" "mulh,mult,mull,madd,sdiv,udiv") (eq_attr "mode" "SI")) (const_string "mult")
(eq_attr "v8type" "fmovi2f") (const_string "r_2_f")
(eq_attr "v8type" "store1") (const_string "store1")
(eq_attr "v8type" "store2") (const_string "store2")
]
(const_string "alu")))
;; Attribute that specifies whether or not the instruction touches fp
;; registers.
(define_attr "fp" "no,yes" (const_string "no"))
;; Attribute that specifies whether or not the instruction touches simd
;; registers.
(define_attr "simd" "no,yes" (const_string "no"))
(define_attr "length" ""
(const_int 4))
;; Attribute that controls whether an alternative is enabled or not.
;; Currently it is only used to disable alternatives which touch fp or simd
;; registers when -mgeneral-regs-only is specified.
(define_attr "enabled" "no,yes"
(cond [(ior
(and (eq_attr "fp" "yes")
(eq (symbol_ref "TARGET_FLOAT") (const_int 0)))
(and (eq_attr "simd" "yes")
(eq (symbol_ref "TARGET_SIMD") (const_int 0))))
(const_string "no")
] (const_string "yes")))
;; -------------------------------------------------------------------
;; Pipeline descriptions and scheduling
;; -------------------------------------------------------------------
;; Processor types.
(include "aarch64-tune.md")
;; Scheduling
(include "aarch64-generic.md")
(include "large.md")
(include "small.md")
;; -------------------------------------------------------------------
;; Jumps and other miscellaneous insns
;; -------------------------------------------------------------------
(define_insn "indirect_jump"
[(set (pc) (match_operand:DI 0 "register_operand" "r"))]
""
"br\\t%0"
[(set_attr "v8type" "branch")]
)
(define_insn "jump"
[(set (pc) (label_ref (match_operand 0 "" "")))]
""
"b\\t%l0"
[(set_attr "v8type" "branch")]
)
(define_expand "cbranch4"
[(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
[(match_operand:GPI 1 "register_operand" "")
(match_operand:GPI 2 "aarch64_plus_operand" "")])
(label_ref (match_operand 3 "" ""))
(pc)))]
""
"
operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1],
operands[2]);
operands[2] = const0_rtx;
"
)
(define_expand "cbranch4"
[(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
[(match_operand:GPF 1 "register_operand" "")
(match_operand:GPF 2 "aarch64_reg_or_zero" "")])
(label_ref (match_operand 3 "" ""))
(pc)))]
""
"
operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1],
operands[2]);
operands[2] = const0_rtx;
"
)
(define_insn "*condjump"
[(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
[(match_operand 1 "cc_register" "") (const_int 0)])
(label_ref (match_operand 2 "" ""))
(pc)))]
""
"b%m0\\t%l2"
[(set_attr "v8type" "branch")]
)
(define_expand "casesi"
[(match_operand:SI 0 "register_operand" "") ; Index
(match_operand:SI 1 "const_int_operand" "") ; Lower bound
(match_operand:SI 2 "const_int_operand" "") ; Total range
(match_operand:DI 3 "" "") ; Table label
(match_operand:DI 4 "" "")] ; Out of range label
""
{
if (operands[1] != const0_rtx)
{
rtx reg = gen_reg_rtx (SImode);
/* Canonical RTL says that if you have:
(minus (X) (CONST))
then this should be emitted as:
(plus (X) (-CONST))
The use of trunc_int_for_mode ensures that the resulting
constant can be represented in SImode, this is important
for the corner case where operand[1] is INT_MIN. */
operands[1] = GEN_INT (trunc_int_for_mode (-INTVAL (operands[1]), SImode));
if (!(*insn_data[CODE_FOR_addsi3].operand[2].predicate)
(operands[1], SImode))
operands[1] = force_reg (SImode, operands[1]);
emit_insn (gen_addsi3 (reg, operands[0], operands[1]));
operands[0] = reg;
}
if (!aarch64_plus_operand (operands[2], SImode))
operands[2] = force_reg (SImode, operands[2]);
emit_jump_insn (gen_cbranchsi4 (gen_rtx_GTU (SImode, const0_rtx,
const0_rtx),
operands[0], operands[2], operands[4]));
operands[2] = force_reg (DImode, gen_rtx_LABEL_REF (VOIDmode, operands[3]));
emit_jump_insn (gen_casesi_dispatch (operands[2], operands[0],
operands[3]));
DONE;
}
)
(define_insn "casesi_dispatch"
[(parallel
[(set (pc)
(mem:DI (unspec [(match_operand:DI 0 "register_operand" "r")
(match_operand:SI 1 "register_operand" "r")]
UNSPEC_CASESI)))
(clobber (reg:CC CC_REGNUM))
(clobber (match_scratch:DI 3 "=r"))
(clobber (match_scratch:DI 4 "=r"))
(use (label_ref (match_operand 2 "" "")))])]
""
"*
return aarch64_output_casesi (operands);
"
[(set_attr "length" "16")
(set_attr "v8type" "branch")]
)
(define_insn "nop"
[(unspec[(const_int 0)] UNSPEC_NOP)]
""
"nop"
[(set_attr "v8type" "misc")]
)
(define_expand "prologue"
[(clobber (const_int 0))]
""
"
aarch64_expand_prologue ();
DONE;
"
)
(define_expand "epilogue"
[(clobber (const_int 0))]
""
"
aarch64_expand_epilogue (false);
DONE;
"
)
(define_expand "sibcall_epilogue"
[(clobber (const_int 0))]
""
"
aarch64_expand_epilogue (true);
DONE;
"
)
(define_insn "*do_return"
[(return)]
""
"ret"
[(set_attr "v8type" "branch")]
)
(define_insn "eh_return"
[(unspec_volatile [(match_operand:DI 0 "register_operand" "r")]
UNSPECV_EH_RETURN)]
""
"#"
[(set_attr "v8type" "branch")]
)
(define_split
[(unspec_volatile [(match_operand:DI 0 "register_operand" "")]
UNSPECV_EH_RETURN)]
"reload_completed"
[(set (match_dup 1) (match_dup 0))]
{
operands[1] = aarch64_final_eh_return_addr ();
}
)
(define_insn "*cb1"
[(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r")
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))]
""
"\\t%0, %l1"
[(set_attr "v8type" "branch")]
)
(define_insn "*tb1"
[(set (pc) (if_then_else
(EQL (zero_extract:DI (match_operand:GPI 0 "register_operand" "r")
(const_int 1)
(match_operand 1 "const_int_operand" "n"))
(const_int 0))
(label_ref (match_operand 2 "" ""))
(pc)))
(clobber (match_scratch:DI 3 "=r"))]
""
"*
if (get_attr_length (insn) == 8)
return \"ubfx\\t%3, %0, %1, #1\;\\t%3, %l2\";
return \"\\t%0, %1, %l2\";
"
[(set_attr "v8type" "branch")
(set_attr "mode" "")
(set (attr "length")
(if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -32768))
(lt (minus (match_dup 2) (pc)) (const_int 32764)))
(const_int 4)
(const_int 8)))]
)
(define_insn "*cb1"
[(set (pc) (if_then_else (LTGE (match_operand:ALLI 0 "register_operand" "r")
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))
(clobber (match_scratch:DI 2 "=r"))]
""
"*
if (get_attr_length (insn) == 8)
return \"ubfx\\t%2, %0, , #1\;\\t%2, %l1\";
return \"\\t%0, , %l1\";
"
[(set_attr "v8type" "branch")
(set_attr "mode" "")
(set (attr "length")
(if_then_else (and (ge (minus (match_dup 1) (pc)) (const_int -32768))
(lt (minus (match_dup 1) (pc)) (const_int 32764)))
(const_int 4)
(const_int 8)))]
)
;; -------------------------------------------------------------------
;; Subroutine calls and sibcalls
;; -------------------------------------------------------------------
(define_expand "call"
[(parallel [(call (match_operand 0 "memory_operand" "")
(match_operand 1 "general_operand" ""))
(use (match_operand 2 "" ""))
(clobber (reg:DI LR_REGNUM))])]
""
"
{
rtx callee;
/* In an untyped call, we can get NULL for operand 2. */
if (operands[2] == NULL)
operands[2] = const0_rtx;
/* Decide if we should generate indirect calls by loading the
64-bit address of the callee into a register before performing
the branch-and-link. */
callee = XEXP (operands[0], 0);
if (GET_CODE (callee) == SYMBOL_REF
? aarch64_is_long_call_p (callee)
: !REG_P (callee))
XEXP (operands[0], 0) = force_reg (Pmode, callee);
}"
)
(define_insn "*call_reg"
[(call (mem:DI (match_operand:DI 0 "register_operand" "r"))
(match_operand 1 "" ""))
(use (match_operand 2 "" ""))
(clobber (reg:DI LR_REGNUM))]
""
"blr\\t%0"
[(set_attr "v8type" "call")]
)
(define_insn "*call_symbol"
[(call (mem:DI (match_operand:DI 0 "" ""))
(match_operand 1 "" ""))
(use (match_operand 2 "" ""))
(clobber (reg:DI LR_REGNUM))]
"GET_CODE (operands[0]) == SYMBOL_REF
&& !aarch64_is_long_call_p (operands[0])"
"bl\\t%a0"
[(set_attr "v8type" "call")]
)
(define_expand "call_value"
[(parallel [(set (match_operand 0 "" "")
(call (match_operand 1 "memory_operand" "")
(match_operand 2 "general_operand" "")))
(use (match_operand 3 "" ""))
(clobber (reg:DI LR_REGNUM))])]
""
"
{
rtx callee;
/* In an untyped call, we can get NULL for operand 3. */
if (operands[3] == NULL)
operands[3] = const0_rtx;
/* Decide if we should generate indirect calls by loading the
64-bit address of the callee into a register before performing
the branch-and-link. */
callee = XEXP (operands[1], 0);
if (GET_CODE (callee) == SYMBOL_REF
? aarch64_is_long_call_p (callee)
: !REG_P (callee))
XEXP (operands[1], 0) = force_reg (Pmode, callee);
}"
)
(define_insn "*call_value_reg"
[(set (match_operand 0 "" "")
(call (mem:DI (match_operand:DI 1 "register_operand" "r"))
(match_operand 2 "" "")))
(use (match_operand 3 "" ""))
(clobber (reg:DI LR_REGNUM))]
""
"blr\\t%1"
[(set_attr "v8type" "call")]
)
(define_insn "*call_value_symbol"
[(set (match_operand 0 "" "")
(call (mem:DI (match_operand:DI 1 "" ""))
(match_operand 2 "" "")))
(use (match_operand 3 "" ""))
(clobber (reg:DI LR_REGNUM))]
"GET_CODE (operands[1]) == SYMBOL_REF
&& !aarch64_is_long_call_p (operands[1])"
"bl\\t%a1"
[(set_attr "v8type" "call")]
)
(define_expand "sibcall"
[(parallel [(call (match_operand 0 "memory_operand" "")
(match_operand 1 "general_operand" ""))
(return)
(use (match_operand 2 "" ""))])]
""
{
if (operands[2] == NULL_RTX)
operands[2] = const0_rtx;
}
)
(define_expand "sibcall_value"
[(parallel [(set (match_operand 0 "" "")
(call (match_operand 1 "memory_operand" "")
(match_operand 2 "general_operand" "")))
(return)
(use (match_operand 3 "" ""))])]
""
{
if (operands[3] == NULL_RTX)
operands[3] = const0_rtx;
}
)
(define_insn "*sibcall_insn"
[(call (mem:DI (match_operand:DI 0 "" "X"))
(match_operand 1 "" ""))
(return)
(use (match_operand 2 "" ""))]
"GET_CODE (operands[0]) == SYMBOL_REF"
"b\\t%a0"
[(set_attr "v8type" "branch")]
)
(define_insn "*sibcall_value_insn"
[(set (match_operand 0 "" "")
(call (mem:DI (match_operand 1 "" "X"))
(match_operand 2 "" "")))
(return)
(use (match_operand 3 "" ""))]
"GET_CODE (operands[1]) == SYMBOL_REF"
"b\\t%a1"
[(set_attr "v8type" "branch")]
)
;; Call subroutine returning any type.
(define_expand "untyped_call"
[(parallel [(call (match_operand 0 "")
(const_int 0))
(match_operand 1 "")
(match_operand 2 "")])]
""
{
int i;
emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
for (i = 0; i < XVECLEN (operands[2], 0); i++)
{
rtx set = XVECEXP (operands[2], 0, i);
emit_move_insn (SET_DEST (set), SET_SRC (set));
}
/* The optimizer does not know that the call sets the function value
registers we stored in the result block. We avoid problems by
claiming that all hard registers are used and clobbered at this
point. */
emit_insn (gen_blockage ());
DONE;
})
;; -------------------------------------------------------------------
;; Moves
;; -------------------------------------------------------------------
(define_expand "mov"
[(set (match_operand:SHORT 0 "nonimmediate_operand" "")
(match_operand:SHORT 1 "general_operand" ""))]
""
"
if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
operands[1] = force_reg (mode, operands[1]);
"
)
(define_insn "*mov_aarch64"
[(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,r, *w,r,*w, m, m, r,*w,*w")
(match_operand:SHORT 1 "general_operand" " r,M,D,m, m,rZ,*w,*w, r,*w"))]
"(register_operand (operands[0], mode)
|| aarch64_reg_or_zero (operands[1], mode))"
"@
mov\\t%w0, %w1
mov\\t%w0, %1
movi\\t%0., %1
ldr\\t%w0, %1
ldr\\t%0, %1
str\\t%w1, %0
str\\t%1, %0
umov\\t%w0, %1.[0]
dup\\t%0., %w1
dup\\t%0, %1.[0]"
[(set_attr "v8type" "move,alu,alu,load1,load1,store1,store1,*,*,*")
(set_attr "simd_type" "*,*,simd_move_imm,*,*,*,*,simd_movgp,simd_dupgp,simd_dup")
(set_attr "simd" "*,*,yes,*,*,*,*,yes,yes,yes")
(set_attr "mode" "")
(set_attr "simd_mode" "")]
)
(define_expand "mov"
[(set (match_operand:GPI 0 "nonimmediate_operand" "")
(match_operand:GPI 1 "general_operand" ""))]
""
"
if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
operands[1] = force_reg (mode, operands[1]);
if (CONSTANT_P (operands[1]))
{
aarch64_expand_mov_immediate (operands[0], operands[1]);
DONE;
}
"
)
(define_insn "*movsi_aarch64"
[(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,*w,m, m,*w, r,*w")
(match_operand:SI 1 "aarch64_mov_operand" " r,M,m, m,rZ,*w,rZ,*w,*w"))]
"(register_operand (operands[0], SImode)
|| aarch64_reg_or_zero (operands[1], SImode))"
"@
mov\\t%w0, %w1
mov\\t%w0, %1
ldr\\t%w0, %1
ldr\\t%s0, %1
str\\t%w1, %0
str\\t%s1, %0
fmov\\t%s0, %w1
fmov\\t%w0, %s1
fmov\\t%s0, %s1"
[(set_attr "v8type" "move,alu,load1,load1,store1,store1,fmov,fmov,fmov")
(set_attr "mode" "SI")
(set_attr "fp" "*,*,*,yes,*,yes,yes,yes,yes")]
)
(define_insn "*movdi_aarch64"
[(set (match_operand:DI 0 "nonimmediate_operand" "=r,k,r,r,r,*w,m, m,r,r, *w, r,*w,w")
(match_operand:DI 1 "aarch64_mov_operand" " r,r,k,N,m, m,rZ,*w,S,Ush,rZ,*w,*w,Dd"))]
"(register_operand (operands[0], DImode)
|| aarch64_reg_or_zero (operands[1], DImode))"
"@
mov\\t%x0, %x1
mov\\t%0, %x1
mov\\t%x0, %1
mov\\t%x0, %1
ldr\\t%x0, %1
ldr\\t%d0, %1
str\\t%x1, %0
str\\t%d1, %0
adr\\t%x0, %a1
adrp\\t%x0, %A1
fmov\\t%d0, %x1
fmov\\t%x0, %d1
fmov\\t%d0, %d1
movi\\t%d0, %1"
[(set_attr "v8type" "move,move,move,alu,load1,load1,store1,store1,adr,adr,fmov,fmov,fmov,fmov")
(set_attr "mode" "DI")
(set_attr "fp" "*,*,*,*,*,yes,*,yes,*,*,yes,yes,yes,*")
(set_attr "simd" "*,*,*,*,*,*,*,*,*,*,*,*,*,yes")]
)
(define_insn "insv_imm"
[(set (zero_extract:GPI (match_operand:GPI 0 "register_operand" "+r")
(const_int 16)
(match_operand:GPI 1 "const_int_operand" "n"))
(match_operand:GPI 2 "const_int_operand" "n"))]
"UINTVAL (operands[1]) < GET_MODE_BITSIZE (mode)
&& UINTVAL (operands[1]) % 16 == 0"
"movk\\t%0, %X2, lsl %1"
[(set_attr "v8type" "movk")
(set_attr "mode" "")]
)
(define_expand "movti"
[(set (match_operand:TI 0 "nonimmediate_operand" "")
(match_operand:TI 1 "general_operand" ""))]
""
"
if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
operands[1] = force_reg (TImode, operands[1]);
"
)
(define_insn "*movti_aarch64"
[(set (match_operand:TI 0
"nonimmediate_operand" "=r, *w,r ,*w,r ,Ump,Ump,*w,m")
(match_operand:TI 1
"aarch64_movti_operand" " rn,r ,*w,*w,Ump,r ,Z , m,*w"))]
"(register_operand (operands[0], TImode)
|| aarch64_reg_or_zero (operands[1], TImode))"
"@
#
#
#
orr\\t%0.16b, %1.16b, %1.16b
ldp\\t%0, %H0, %1
stp\\t%1, %H1, %0
stp\\txzr, xzr, %0
ldr\\t%q0, %1
str\\t%q1, %0"
[(set_attr "v8type" "move2,fmovi2f,fmovf2i,*, \
load2,store2,store2,fpsimd_load,fpsimd_store")
(set_attr "simd_type" "*,*,*,simd_move,*,*,*,*,*")
(set_attr "mode" "DI,DI,DI,TI,DI,DI,DI,TI,TI")
(set_attr "length" "8,8,8,4,4,4,4,4,4")
(set_attr "fp" "*,*,*,*,*,*,*,yes,yes")
(set_attr "simd" "*,*,*,yes,*,*,*,*,*")])
;; Split a TImode register-register or register-immediate move into
;; its component DImode pieces, taking care to handle overlapping
;; source and dest registers.
(define_split
[(set (match_operand:TI 0 "register_operand" "")
(match_operand:TI 1 "aarch64_reg_or_imm" ""))]
"reload_completed && aarch64_split_128bit_move_p (operands[0], operands[1])"
[(const_int 0)]
{
aarch64_split_128bit_move (operands[0], operands[1]);
DONE;
})
(define_expand "mov"
[(set (match_operand:GPF 0 "nonimmediate_operand" "")
(match_operand:GPF 1 "general_operand" ""))]
""
"
if (!TARGET_FLOAT)
{
sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\");
FAIL;
}
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (mode, operands[1]);
"
)
(define_insn "*movsf_aarch64"
[(set (match_operand:SF 0 "nonimmediate_operand" "=w, ?r,w,w ,w,m,r,m ,r")
(match_operand:SF 1 "general_operand" "?rY, w,w,Ufc,m,w,m,rY,r"))]
"TARGET_FLOAT && (register_operand (operands[0], SFmode)
|| register_operand (operands[1], SFmode))"
"@
fmov\\t%s0, %w1
fmov\\t%w0, %s1
fmov\\t%s0, %s1
fmov\\t%s0, %1
ldr\\t%s0, %1
str\\t%s1, %0
ldr\\t%w0, %1
str\\t%w1, %0
mov\\t%w0, %w1"
[(set_attr "v8type" "fmovi2f,fmovf2i,\
fmov,fconst,fpsimd_load,\
fpsimd_store,fpsimd_load,fpsimd_store,fmov")
(set_attr "mode" "SF")]
)
(define_insn "*movdf_aarch64"
[(set (match_operand:DF 0 "nonimmediate_operand" "=w, ?r,w,w ,w,m,r,m ,r")
(match_operand:DF 1 "general_operand" "?rY, w,w,Ufc,m,w,m,rY,r"))]
"TARGET_FLOAT && (register_operand (operands[0], DFmode)
|| register_operand (operands[1], DFmode))"
"@
fmov\\t%d0, %x1
fmov\\t%x0, %d1
fmov\\t%d0, %d1
fmov\\t%d0, %1
ldr\\t%d0, %1
str\\t%d1, %0
ldr\\t%x0, %1
str\\t%x1, %0
mov\\t%x0, %x1"
[(set_attr "v8type" "fmovi2f,fmovf2i,\
fmov,fconst,fpsimd_load,\
fpsimd_store,fpsimd_load,fpsimd_store,move")
(set_attr "mode" "DF")]
)
(define_expand "movtf"
[(set (match_operand:TF 0 "nonimmediate_operand" "")
(match_operand:TF 1 "general_operand" ""))]
""
"
if (!TARGET_FLOAT)
{
sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\");
FAIL;
}
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (TFmode, operands[1]);
"
)
(define_insn "*movtf_aarch64"
[(set (match_operand:TF 0
"nonimmediate_operand" "=w,?&r,w ,?r,w,?w,w,m,?r ,Ump")
(match_operand:TF 1
"general_operand" " w,?r, ?r,w ,Y,Y ,m,w,Ump,?rY"))]
"TARGET_FLOAT && (register_operand (operands[0], TFmode)
|| register_operand (operands[1], TFmode))"
"@
orr\\t%0.16b, %1.16b, %1.16b
#
#
#
movi\\t%0.2d, #0
fmov\\t%s0, wzr
ldr\\t%q0, %1
str\\t%q1, %0
ldp\\t%0, %H0, %1
stp\\t%1, %H1, %0"
[(set_attr "v8type" "logic,move2,fmovi2f,fmovf2i,fconst,fconst,fpsimd_load,fpsimd_store,fpsimd_load2,fpsimd_store2")
(set_attr "mode" "DF,DF,DF,DF,DF,DF,TF,TF,DF,DF")
(set_attr "length" "4,8,8,8,4,4,4,4,4,4")
(set_attr "fp" "*,*,yes,yes,*,yes,yes,yes,*,*")
(set_attr "simd" "yes,*,*,*,yes,*,*,*,*,*")]
)
(define_split
[(set (match_operand:TF 0 "register_operand" "")
(match_operand:TF 1 "aarch64_reg_or_imm" ""))]
"reload_completed && aarch64_split_128bit_move_p (operands[0], operands[1])"
[(const_int 0)]
{
aarch64_split_128bit_move (operands[0], operands[1]);
DONE;
}
)
;; Operands 1 and 3 are tied together by the final condition; so we allow
;; fairly lax checking on the second memory operation.
(define_insn "load_pair"
[(set (match_operand:GPI 0 "register_operand" "=r")
(match_operand:GPI 1 "aarch64_mem_pair_operand" "Ump"))
(set (match_operand:GPI 2 "register_operand" "=r")
(match_operand:GPI 3 "memory_operand" "m"))]
"rtx_equal_p (XEXP (operands[3], 0),
plus_constant (Pmode,
XEXP (operands[1], 0),
GET_MODE_SIZE (mode)))"
"ldp\\t%0, %2, %1"
[(set_attr "v8type" "load2")
(set_attr "mode" "")]
)
;; Operands 0 and 2 are tied together by the final condition; so we allow
;; fairly lax checking on the second memory operation.
(define_insn "store_pair"
[(set (match_operand:GPI 0 "aarch64_mem_pair_operand" "=Ump")
(match_operand:GPI 1 "register_operand" "r"))
(set (match_operand:GPI 2 "memory_operand" "=m")
(match_operand:GPI 3 "register_operand" "r"))]
"rtx_equal_p (XEXP (operands[2], 0),
plus_constant (Pmode,
XEXP (operands[0], 0),
GET_MODE_SIZE (mode)))"
"stp\\t%1, %3, %0"
[(set_attr "v8type" "store2")
(set_attr "mode" "")]
)
;; Operands 1 and 3 are tied together by the final condition; so we allow
;; fairly lax checking on the second memory operation.
(define_insn "load_pair"
[(set (match_operand:GPF 0 "register_operand" "=w")
(match_operand:GPF 1 "aarch64_mem_pair_operand" "Ump"))
(set (match_operand:GPF 2 "register_operand" "=w")
(match_operand:GPF 3 "memory_operand" "m"))]
"rtx_equal_p (XEXP (operands[3], 0),
plus_constant (Pmode,
XEXP (operands[1], 0),
GET_MODE_SIZE (mode)))"
"ldp\\t%0, %2, %1"
[(set_attr "v8type" "fpsimd_load2")
(set_attr "mode" "")]
)
;; Operands 0 and 2 are tied together by the final condition; so we allow
;; fairly lax checking on the second memory operation.
(define_insn "store_pair"
[(set (match_operand:GPF 0 "aarch64_mem_pair_operand" "=Ump")
(match_operand:GPF 1 "register_operand" "w"))
(set (match_operand:GPF 2 "memory_operand" "=m")
(match_operand:GPF 3 "register_operand" "w"))]
"rtx_equal_p (XEXP (operands[2], 0),
plus_constant (Pmode,
XEXP (operands[0], 0),
GET_MODE_SIZE (mode)))"
"stp\\t%1, %3, %0"
[(set_attr "v8type" "fpsimd_load2")
(set_attr "mode" "")]
)
;; Load pair with writeback. This is primarily used in function epilogues
;; when restoring [fp,lr]
(define_insn "loadwb_pair_"
[(parallel
[(set (match_operand:PTR 0 "register_operand" "=k")
(plus:PTR (match_operand:PTR 1 "register_operand" "0")
(match_operand:PTR 4 "const_int_operand" "n")))
(set (match_operand:GPI 2 "register_operand" "=r")
(mem:GPI (plus:PTR (match_dup 1)
(match_dup 4))))
(set (match_operand:GPI 3 "register_operand" "=r")
(mem:GPI (plus:PTR (match_dup 1)
(match_operand:PTR 5 "const_int_operand" "n"))))])]
"INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (mode)"
"ldp\\t%2, %3, [%1], %4"
[(set_attr "v8type" "load2")
(set_attr "mode" "")]
)
;; Store pair with writeback. This is primarily used in function prologues
;; when saving [fp,lr]
(define_insn "storewb_pair_"
[(parallel
[(set (match_operand:PTR 0 "register_operand" "=&k")
(plus:PTR (match_operand:PTR 1 "register_operand" "0")
(match_operand:PTR 4 "const_int_operand" "n")))
(set (mem:GPI (plus:PTR (match_dup 0)
(match_dup 4)))
(match_operand:GPI 2 "register_operand" "r"))
(set (mem:GPI (plus:PTR (match_dup 0)
(match_operand:PTR 5 "const_int_operand" "n")))
(match_operand:GPI 3 "register_operand" "r"))])]
"INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (mode)"
"stp\\t%2, %3, [%0, %4]!"
[(set_attr "v8type" "store2")
(set_attr "mode" "")]
)
;; -------------------------------------------------------------------
;; Sign/Zero extension
;; -------------------------------------------------------------------
(define_expand "sidi2"
[(set (match_operand:DI 0 "register_operand")
(ANY_EXTEND:DI (match_operand:SI 1 "nonimmediate_operand")))]
""
)
(define_insn "*extendsidi2_aarch64"
[(set (match_operand:DI 0 "register_operand" "=r,r")
(sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
""
"@
sxtw\t%0, %w1
ldrsw\t%0, %1"
[(set_attr "v8type" "extend,load1")
(set_attr "mode" "DI")]
)
(define_insn "*zero_extendsidi2_aarch64"
[(set (match_operand:DI 0 "register_operand" "=r,r")
(zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
""
"@
uxtw\t%0, %w1
ldr\t%w0, %1"
[(set_attr "v8type" "extend,load1")
(set_attr "mode" "DI")]
)
(define_expand "2"
[(set (match_operand:GPI 0 "register_operand")
(ANY_EXTEND:GPI (match_operand:SHORT 1 "nonimmediate_operand")))]
""
)
(define_insn "*extend2_aarch64"
[(set (match_operand:GPI 0 "register_operand" "=r,r")
(sign_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
""
"@
sxt\t%0, %w1
ldrs\t%0, %1"
[(set_attr "v8type" "extend,load1")
(set_attr "mode" "")]
)
(define_insn "*zero_extend2_aarch64"
[(set (match_operand:GPI 0 "register_operand" "=r,r,*w")
(zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m,m")))]
""
"@
uxt\t%0, %w1
ldr\t%w0, %1
ldr\t%0, %1"
[(set_attr "v8type" "extend,load1,load1")
(set_attr "mode" "")]
)
(define_expand "qihi2"
[(set (match_operand:HI 0 "register_operand")
(ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand")))]
""
)
(define_insn "*qihi2_aarch64"
[(set (match_operand:HI 0 "register_operand" "=r,r")
(ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
""
"@
xtb\t%w0, %w1
b\t%w0, %1"
[(set_attr "v8type" "extend,load1")
(set_attr "mode" "HI")]
)
;; -------------------------------------------------------------------
;; Simple arithmetic
;; -------------------------------------------------------------------
(define_expand "add3"
[(set
(match_operand:GPI 0 "register_operand" "")
(plus:GPI (match_operand:GPI 1 "register_operand" "")
(match_operand:GPI 2 "aarch64_pluslong_operand" "")))]
""
"
if (! aarch64_plus_operand (operands[2], VOIDmode))
{
rtx subtarget = ((optimize && can_create_pseudo_p ())
? gen_reg_rtx (mode) : operands[0]);
HOST_WIDE_INT imm = INTVAL (operands[2]);
if (imm < 0)
imm = -(-imm & ~0xfff);
else
imm &= ~0xfff;
emit_insn (gen_add3 (subtarget, operands[1], GEN_INT (imm)));
operands[1] = subtarget;
operands[2] = GEN_INT (INTVAL (operands[2]) - imm);
}
"
)
(define_insn "*addsi3_aarch64"
[(set
(match_operand:SI 0 "register_operand" "=rk,rk,rk")
(plus:SI
(match_operand:SI 1 "register_operand" "%rk,rk,rk")
(match_operand:SI 2 "aarch64_plus_operand" "I,r,J")))]
""
"@
add\\t%w0, %w1, %2
add\\t%w0, %w1, %w2
sub\\t%w0, %w1, #%n2"
[(set_attr "v8type" "alu")
(set_attr "mode" "SI")]
)
;; zero_extend version of above
(define_insn "*addsi3_aarch64_uxtw"
[(set
(match_operand:DI 0 "register_operand" "=rk,rk,rk")
(zero_extend:DI
(plus:SI (match_operand:SI 1 "register_operand" "%rk,rk,rk")
(match_operand:SI 2 "aarch64_plus_operand" "I,r,J"))))]
""
"@
add\\t%w0, %w1, %2
add\\t%w0, %w1, %w2
sub\\t%w0, %w1, #%n2"
[(set_attr "v8type" "alu")
(set_attr "mode" "SI")]
)
(define_insn "*adddi3_aarch64"
[(set
(match_operand:DI 0 "register_operand" "=rk,rk,rk,!w")
(plus:DI
(match_operand:DI 1 "register_operand" "%rk,rk,rk,!w")
(match_operand:DI 2 "aarch64_plus_operand" "I,r,J,!w")))]
""
"@
add\\t%x0, %x1, %2
add\\t%x0, %x1, %x2
sub\\t%x0, %x1, #%n2
add\\t%d0, %d1, %d2"
[(set_attr "v8type" "alu")
(set_attr "mode" "DI")
(set_attr "simd" "*,*,*,yes")]
)
(define_insn "*add3_compare0"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
(plus:GPI (match_operand:GPI 1 "register_operand" "%r,r")
(match_operand:GPI 2 "aarch64_plus_operand" "rI,J"))
(const_int 0)))
(set (match_operand:GPI 0 "register_operand" "=r,r")
(plus:GPI (match_dup 1) (match_dup 2)))]
""
"@
adds\\t%0, %1, %2
subs\\t%0, %1, #%n2"
[(set_attr "v8type" "alus")
(set_attr "mode" "")]
)
;; zero_extend version of above
(define_insn "*addsi3_compare0_uxtw"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
(plus:SI (match_operand:SI 1 "register_operand" "%r,r")
(match_operand:SI 2 "aarch64_plus_operand" "rI,J"))
(const_int 0)))
(set (match_operand:DI 0 "register_operand" "=r,r")
(zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))]
""
"@
adds\\t%w0, %w1, %w2
subs\\t%w0, %w1, #%n2"
[(set_attr "v8type" "alus")
(set_attr "mode" "SI")]
)
(define_insn "*adds_mul_imm_"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
(plus:GPI (mult:GPI
(match_operand:GPI 1 "register_operand" "r")
(match_operand:QI 2 "aarch64_pwr_2_" "n"))
(match_operand:GPI 3 "register_operand" "rk"))
(const_int 0)))
(set (match_operand:GPI 0 "register_operand" "=r")
(plus:GPI (mult:GPI (match_dup 1) (match_dup 2))
(match_dup 3)))]
""
"adds\\t%0, %3, %1, lsl %p2"
[(set_attr "v8type" "alus_shift")
(set_attr "mode" "")]
)
(define_insn "*subs_mul_imm_"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
(minus:GPI (match_operand:GPI 1 "register_operand" "rk")
(mult:GPI
(match_operand:GPI 2 "register_operand" "r")
(match_operand:QI 3 "aarch64_pwr_2_" "n")))
(const_int 0)))
(set (match_operand:GPI 0 "register_operand" "=r")
(minus:GPI (match_dup 1)
(mult:GPI (match_dup 2) (match_dup 3))))]
""
"subs\\t%0, %1, %2, lsl %p3"
[(set_attr "v8type" "alus_shift")
(set_attr "mode" "")]
)
(define_insn "*adds__"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
(plus:GPI
(ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r"))
(match_operand:GPI 2 "register_operand" "r"))
(const_int 0)))
(set (match_operand:GPI 0 "register_operand" "=r")
(plus:GPI (ANY_EXTEND:GPI (match_dup 1)) (match_dup 2)))]
""
"adds\\t%0, %2, %1, xt"
[(set_attr "v8type" "alus_ext")
(set_attr "mode" "")]
)
(define_insn "*subs__"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
(minus:GPI (match_operand:GPI 1 "register_operand" "r")
(ANY_EXTEND:GPI
(match_operand:ALLX 2 "register_operand" "r")))
(const_int 0)))
(set (match_operand:GPI 0 "register_operand" "=r")
(minus:GPI (match_dup 1) (ANY_EXTEND:GPI (match_dup 2))))]
""
"subs\\t%0, %1, %2, xt"
[(set_attr "v8type" "alus_ext")
(set_attr "mode" "")]
)
(define_insn "*adds__multp2"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
(plus:GPI (ANY_EXTRACT:GPI
(mult:GPI (match_operand:GPI 1 "register_operand" "r")
(match_operand 2 "aarch64_pwr_imm3" "Up3"))
(match_operand 3 "const_int_operand" "n")
(const_int 0))
(match_operand:GPI 4 "register_operand" "r"))
(const_int 0)))
(set (match_operand:GPI 0 "register_operand" "=r")
(plus:GPI (ANY_EXTRACT:GPI (mult:GPI (match_dup 1) (match_dup 2))
(match_dup 3)
(const_int 0))
(match_dup 4)))]
"aarch64_is_extend_from_extract (mode, operands[2], operands[3])"
"adds\\t%0, %4, %1, xt%e3 %p2"
[(set_attr "v8type" "alus_ext")
(set_attr "mode" "")]
)
(define_insn "*subs__multp2"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
(minus:GPI (match_operand:GPI 4 "register_operand" "r")
(ANY_EXTRACT:GPI
(mult:GPI (match_operand:GPI 1 "register_operand" "r")
(match_operand 2 "aarch64_pwr_imm3" "Up3"))
(match_operand 3 "const_int_operand" "n")
(const_int 0)))
(const_int 0)))
(set (match_operand:GPI 0 "register_operand" "=r")
(minus:GPI (match_dup 4) (ANY_EXTRACT:GPI
(mult:GPI (match_dup 1) (match_dup 2))
(match_dup 3)
(const_int 0))))]
"aarch64_is_extend_from_extract (mode, operands[2], operands[3])"
"subs\\t%0, %4, %1, xt%e3 %p2"
[(set_attr "v8type" "alus_ext")
(set_attr "mode" "")]
)
(define_insn "*add3nr_compare0"
[(set (reg:CC_NZ CC_REGNUM)
(compare:CC_NZ
(plus:GPI (match_operand:GPI 0 "register_operand" "%r,r")
(match_operand:GPI 1 "aarch64_plus_operand" "rI,J"))
(const_int 0)))]
""
"@
cmn\\t%0, %1
cmp\\t%0, #%n1"
[(set_attr "v8type" "alus")
(set_attr "mode" "")]
)
(define_insn "*compare_neg"
[(set (reg:CC CC_REGNUM)
(compare:CC
(match_operand:GPI 0 "register_operand" "r")
(neg:GPI (match_operand:GPI 1 "register_operand" "r"))))]
""
"cmn\\t%0, %1"
[(set_attr "v8type" "alus")
(set_attr "mode" "")]
)
(define_insn "*add__"
[(set (match_operand:GPI 0 "register_operand" "=rk")
(plus:GPI (ASHIFT:GPI (match_operand:GPI 1 "register_operand" "r")
(match_operand:QI 2 "aarch64_shift_imm_" "n"))
(match_operand:GPI 3 "register_operand" "r")))]
""
"add\\t%0, %3, %1, %2"
[(set_attr "v8type" "alu_shift")
(set_attr "mode" "")]
)
;; zero_extend version of above
(define_insn "*add__si_uxtw"
[(set (match_operand:DI 0 "register_operand" "=rk")
(zero_extend:DI
(plus:SI (ASHIFT:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:QI 2 "aarch64_shift_imm_si" "n"))
(match_operand:SI 3 "register_operand" "r"))))]
""
"add\\t%w0, %w3, %w1, %2"
[(set_attr "v8type" "alu_shift")
(set_attr "mode" "SI")]
)
(define_insn "*add_mul_imm_"
[(set (match_operand:GPI 0 "register_operand" "=rk")
(plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r")
(match_operand:QI 2 "aarch64_pwr_2_" "n"))
(match_operand:GPI 3 "register_operand" "r")))]
""
"add\\t%0, %3, %1, lsl %p2"
[(set_attr "v8type" "alu_shift")
(set_attr "mode" "")]
)
(define_insn "*add__"
[(set (match_operand:GPI 0 "register_operand" "=rk")
(plus:GPI (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r"))
(match_operand:GPI 2 "register_operand" "r")))]
""
"add\\t%0, %2, %1, xt"
[(set_attr "v8type" "alu_ext")
(set_attr "mode" "")]
)
;; zero_extend version of above
(define_insn "*add__si_uxtw"
[(set (match_operand:DI 0 "register_operand" "=rk")
(zero_extend:DI
(plus:SI (ANY_EXTEND:SI (match_operand:SHORT 1 "register_operand" "r"))
(match_operand:GPI 2 "register_operand" "r"))))]
""
"add\\t%w0, %w2, %w1, xt"
[(set_attr "v8type" "alu_ext")
(set_attr "mode" "SI")]
)
(define_insn "*add__shft_"
[(set (match_operand:GPI 0 "register_operand" "=rk")
(plus:GPI (ashift:GPI (ANY_EXTEND:GPI
(match_operand:ALLX 1 "register_operand" "r"))
(match_operand 2 "aarch64_imm3" "Ui3"))
(match_operand:GPI 3 "register_operand" "r")))]
""
"add\\t%0, %3, %1, xt %2"
[(set_attr "v8type" "alu_ext")
(set_attr "mode" "")]
)
;; zero_extend version of above
(define_insn "*add__shft_si_uxtw"
[(set (match_operand:DI 0 "register_operand" "=rk")
(zero_extend:DI
(plus:SI (ashift:SI (ANY_EXTEND:SI
(match_operand:SHORT 1 "register_operand" "r"))
(match_operand 2 "aarch64_imm3" "Ui3"))
(match_operand:SI 3 "register_operand" "r"))))]
""
"add\\t%w0, %w3, %w1, xt %2"
[(set_attr "v8type" "alu_ext")
(set_attr "mode" "SI")]
)
(define_insn "*add__mult_"
[(set (match_operand:GPI 0 "register_operand" "=rk")
(plus:GPI (mult:GPI (ANY_EXTEND:GPI
(match_operand:ALLX 1 "register_operand" "r"))
(match_operand 2 "aarch64_pwr_imm3" "Up3"))
(match_operand:GPI 3 "register_operand" "r")))]
""
"add\\t%0, %3, %1, xt %p2"
[(set_attr "v8type" "alu_ext")
(set_attr "mode" "")]
)
;; zero_extend version of above
(define_insn "*add__mult_si_uxtw"
[(set (match_operand:DI 0 "register_operand" "=rk")
(zero_extend:DI (plus:SI (mult:SI (ANY_EXTEND:SI
(match_operand:SHORT 1 "register_operand" "r"))
(match_operand 2 "aarch64_pwr_imm3" "Up3"))
(match_operand:SI 3 "register_operand" "r"))))]
""
"add\\t%w0, %w3, %w1, xt %p2"
[(set_attr "v8type" "alu_ext")
(set_attr "mode" "SI")]
)
(define_insn "*add__multp2"
[(set (match_operand:GPI 0 "register_operand" "=rk")
(plus:GPI (ANY_EXTRACT:GPI
(mult:GPI (match_operand:GPI 1 "register_operand" "r")
(match_operand 2 "aarch64_pwr_imm3" "Up3"))
(match_operand 3 "const_int_operand" "n")
(const_int 0))
(match_operand:GPI 4 "register_operand" "r")))]
"aarch64_is_extend_from_extract (mode, operands[2], operands[3])"
"add\\t%0, %4, %1, xt%e3 %p2"
[(set_attr "v8type" "alu_ext")
(set_attr "mode" "")]
)
;; zero_extend version of above
(define_insn "*add_si_multp2_uxtw"
[(set (match_operand:DI 0 "register_operand" "=rk")
(zero_extend:DI
(plus:SI (ANY_EXTRACT:SI
(mult:SI (match_operand:SI 1 "register_operand" "r")
(match_operand 2 "aarch64_pwr_imm3" "Up3"))
(match_operand 3 "const_int_operand" "n")
(const_int 0))
(match_operand:SI 4 "register_operand" "r"))))]
"aarch64_is_extend_from_extract (SImode, operands[2], operands[3])"
"add\\t%w0, %w4, %w1, xt%e3 %p2"
[(set_attr "v8type" "alu_ext")
(set_attr "mode" "SI")]
)
(define_insn "*add3_carryin"
[(set
(match_operand:GPI 0 "register_operand" "=r")
(plus:GPI (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
(plus:GPI
(match_operand:GPI 1 "register_operand" "r")
(match_operand:GPI 2 "register_operand" "r"))))]
""
"adc\\t%0, %1, %2"
[(set_attr "v8type" "adc")
(set_attr "mode" "")]
)
;; zero_extend version of above
(define_insn "*addsi3_carryin_uxtw"
[(set
(match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI
(plus:SI (geu:SI (reg:CC CC_REGNUM) (const_int 0))
(plus:SI
(match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "register_operand" "r")))))]
""
"adc\\t%w0, %w1, %w2"
[(set_attr "v8type" "adc")
(set_attr "mode" "SI")]
)
(define_insn "*add3_carryin_alt1"
[(set
(match_operand:GPI 0 "register_operand" "=r")
(plus:GPI (plus:GPI
(match_operand:GPI 1 "register_operand" "r")
(match_operand:GPI 2 "register_operand" "r"))
(geu:GPI (reg:CC CC_REGNUM) (const_int 0))))]
""
"adc\\t%0, %1, %2"
[(set_attr "v8type" "adc")
(set_attr "mode" "")]
)
;; zero_extend version of above
(define_insn "*addsi3_carryin_alt1_uxtw"
[(set
(match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI
(plus:SI (plus:SI
(match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "register_operand" "r"))
(geu:SI (reg:CC CC_REGNUM) (const_int 0)))))]
""
"adc\\t%w0, %w1, %w2"
[(set_attr "v8type" "adc")
(set_attr "mode" "SI")]
)
(define_insn "*add3_carryin_alt2"
[(set
(match_operand:GPI 0 "register_operand" "=r")
(plus:GPI (plus:GPI
(geu:GPI (reg:CC CC_REGNUM) (const_int 0))
(match_operand:GPI 1 "register_operand" "r"))
(match_operand:GPI 2 "register_operand" "r")))]
""
"adc\\t%0, %1, %2"
[(set_attr "v8type" "adc")
(set_attr "mode" "")]
)
;; zero_extend version of above
(define_insn "*addsi3_carryin_alt2_uxtw"
[(set
(match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI
(plus:SI (plus:SI
(geu:SI (reg:CC CC_REGNUM) (const_int 0))
(match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "register_operand" "r"))))]
""
"adc\\t%w0, %w1, %w2"
[(set_attr "v8type" "adc")
(set_attr "mode" "SI")]
)
(define_insn "*add3_carryin_alt3"
[(set
(match_operand:GPI 0 "register_operand" "=r")
(plus:GPI (plus:GPI
(geu:GPI (reg:CC CC_REGNUM) (const_int 0))
(match_operand:GPI 2 "register_operand" "r"))
(match_operand:GPI 1 "register_operand" "r")))]
""
"adc\\t%0, %1, %2"
[(set_attr "v8type" "adc")
(set_attr "mode" "")]
)
;; zero_extend version of above
(define_insn "*addsi3_carryin_alt3_uxtw"
[(set
(match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI
(plus:SI (plus:SI
(geu:SI (reg:CC CC_REGNUM) (const_int 0))
(match_operand:SI 2 "register_operand" "r"))
(match_operand:SI 1 "register_operand" "r"))))]
""
"adc\\t%w0, %w1, %w2"
[(set_attr "v8type" "adc")
(set_attr "mode" "SI")]
)
(define_insn "*add_uxt