;; Machine description for RISC-V for GNU compiler.
;; Copyright (C) 2011-2024 Free Software Foundation, Inc.
;; Contributed by Andrew Waterman (andrew@sifive.com).
;; Based on MIPS target for GNU compiler.
;; This file is part of GCC.
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; .
;; Keep this list and the one above riscv_print_operand in sync.
;; The special asm out single letter directives following a '%' are:
;; h -- Print the high-part relocation associated with OP, after stripping
;; any outermost HIGH.
;; R -- Print the low-part relocation associated with OP.
;; C -- Print the integer branch condition for comparison OP.
;; A -- Print the atomic operation suffix for memory model OP.
;; F -- Print a FENCE if the memory model requires a release.
;; z -- Print x0 if OP is zero, otherwise print OP normally.
;; i -- Print i if the operand is not a register.
;; S -- Print shift-index of single-bit mask OP.
;; T -- Print shift-index of inverted single-bit mask OP.
;; ~ -- Print w if TARGET_64BIT is true; otherwise not print anything.
(define_c_enum "unspec" [
;; Override return address for exception handling.
UNSPEC_EH_RETURN
;; Symbolic accesses. The order of this list must match that of
;; enum riscv_symbol_type in riscv-protos.h.
UNSPEC_ADDRESS_FIRST
UNSPEC_FORCE_FOR_MEM
UNSPEC_PCREL
UNSPEC_LOAD_GOT
UNSPEC_TLS
UNSPEC_TLS_LE
UNSPEC_TLS_IE
UNSPEC_TLS_GD
UNSPEC_TLSDESC
;; High part of PC-relative address.
UNSPEC_AUIPC
;; Floating-point unspecs.
UNSPEC_FLT_QUIET
UNSPEC_FLE_QUIET
UNSPEC_COPYSIGN
UNSPEC_FMV_X_W
UNSPEC_FMVH_X_D
UNSPEC_RINT
UNSPEC_ROUND
UNSPEC_FLOOR
UNSPEC_CEIL
UNSPEC_BTRUNC
UNSPEC_ROUNDEVEN
UNSPEC_NEARBYINT
UNSPEC_LRINT
UNSPEC_FMIN
UNSPEC_FMAX
UNSPEC_FMINM
UNSPEC_FMAXM
UNSPEC_FCLASS
;; Stack tie
UNSPEC_TIE
;; OR-COMBINE
UNSPEC_ORC_B
;; Zbc unspecs
UNSPEC_CLMUL
UNSPEC_CLMULH
UNSPEC_CLMULR
;; the calling convention of callee
UNSPEC_CALLEE_CC
;; String unspecs
UNSPEC_STRLEN
;; Workaround for HFmode and BFmode without hardware extension
UNSPEC_FMV_FP16_X
;; XTheadFmv moves
UNSPEC_XTHEADFMV
UNSPEC_XTHEADFMV_HW
])
(define_c_enum "unspecv" [
;; Register save and restore.
UNSPECV_GPR_SAVE
UNSPECV_GPR_RESTORE
;; Floating-point unspecs.
UNSPECV_FRCSR
UNSPECV_FSCSR
UNSPECV_FRFLAGS
UNSPECV_FSFLAGS
UNSPECV_FSNVSNAN
;; Interrupt handler instructions.
UNSPECV_MRET
UNSPECV_SRET
UNSPECV_URET
;; Blockage and synchronization.
UNSPECV_BLOCKAGE
UNSPECV_FENCE
UNSPECV_FENCE_I
;; Stack Smash Protector
UNSPEC_SSP_SET
UNSPEC_SSP_TEST
;; CMO instructions.
UNSPECV_CLEAN
UNSPECV_FLUSH
UNSPECV_INVAL
UNSPECV_ZERO
UNSPECV_PREI
;; Zihintpause unspec
UNSPECV_PAUSE
;; XTheadInt unspec
UNSPECV_XTHEADINT_PUSH
UNSPECV_XTHEADINT_POP
])
(define_constants
[(RETURN_ADDR_REGNUM 1)
(SP_REGNUM 2)
(GP_REGNUM 3)
(TP_REGNUM 4)
(T0_REGNUM 5)
(T1_REGNUM 6)
(S0_REGNUM 8)
(S1_REGNUM 9)
(A0_REGNUM 10)
(A1_REGNUM 11)
(S2_REGNUM 18)
(S3_REGNUM 19)
(S4_REGNUM 20)
(S5_REGNUM 21)
(S6_REGNUM 22)
(S7_REGNUM 23)
(S8_REGNUM 24)
(S9_REGNUM 25)
(S10_REGNUM 26)
(S11_REGNUM 27)
(NORMAL_RETURN 0)
(SIBCALL_RETURN 1)
(EXCEPTION_RETURN 2)
(VL_REGNUM 66)
(VTYPE_REGNUM 67)
(VXRM_REGNUM 68)
(FRM_REGNUM 69)
])
(include "predicates.md")
(include "constraints.md")
(include "iterators.md")
;; ....................
;;
;; Attributes
;;
;; ....................
(define_attr "got" "unset,xgot_high,load"
(const_string "unset"))
;; Classification of moves, extensions and truncations. Most values
;; are as for "type" (see below) but there are also the following
;; move-specific values:
;;
;; andi a single ANDI instruction
;; shift_shift a shift left followed by a shift right
;;
;; This attribute is used to determine the instruction's length and
;; scheduling type. For doubleword moves, the attribute always describes
;; the split instructions; in some cases, it is more appropriate for the
;; scheduling type to be "multi" instead.
(define_attr "move_type"
"unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
const,logical,arith,andi,shift_shift,rdvlenb"
(const_string "unknown"))
;; Main data type used by the insn
(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,HF,BF,SF,DF,TF,
RVVMF64BI,RVVMF32BI,RVVMF16BI,RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,
RVVM8QI,RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI,
RVVM8HI,RVVM4HI,RVVM2HI,RVVM1HI,RVVMF2HI,RVVMF4HI,
RVVM8BF,RVVM4BF,RVVM2BF,RVVM1BF,RVVMF2BF,RVVMF4BF,
RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF,
RVVM8SI,RVVM4SI,RVVM2SI,RVVM1SI,RVVMF2SI,
RVVM8SF,RVVM4SF,RVVM2SF,RVVM1SF,RVVMF2SF,
RVVM8DI,RVVM4DI,RVVM2DI,RVVM1DI,
RVVM8DF,RVVM4DF,RVVM2DF,RVVM1DF,
RVVM1x8QI,RVVMF2x8QI,RVVMF4x8QI,RVVMF8x8QI,
RVVM1x7QI,RVVMF2x7QI,RVVMF4x7QI,RVVMF8x7QI,
RVVM1x6QI,RVVMF2x6QI,RVVMF4x6QI,RVVMF8x6QI,
RVVM1x5QI,RVVMF2x5QI,RVVMF4x5QI,RVVMF8x5QI,
RVVM2x4QI,RVVM1x4QI,RVVMF2x4QI,RVVMF4x4QI,RVVMF8x4QI,
RVVM2x3QI,RVVM1x3QI,RVVMF2x3QI,RVVMF4x3QI,RVVMF8x3QI,
RVVM4x2QI,RVVM2x2QI,RVVM1x2QI,RVVMF2x2QI,RVVMF4x2QI,RVVMF8x2QI,
RVVM1x8HI,RVVMF2x8HI,RVVMF4x8HI,
RVVM1x7HI,RVVMF2x7HI,RVVMF4x7HI,
RVVM1x6HI,RVVMF2x6HI,RVVMF4x6HI,
RVVM1x5HI,RVVMF2x5HI,RVVMF4x5HI,
RVVM2x4HI,RVVM1x4HI,RVVMF2x4HI,RVVMF4x4HI,
RVVM2x3HI,RVVM1x3HI,RVVMF2x3HI,RVVMF4x3HI,
RVVM4x2HI,RVVM2x2HI,RVVM1x2HI,RVVMF2x2HI,RVVMF4x2HI,
RVVM1x8BF,RVVMF2x8BF,RVVMF4x8BF,RVVM1x7BF,RVVMF2x7BF,
RVVMF4x7BF,RVVM1x6BF,RVVMF2x6BF,RVVMF4x6BF,RVVM1x5BF,
RVVMF2x5BF,RVVMF4x5BF,RVVM2x4BF,RVVM1x4BF,RVVMF2x4BF,
RVVMF4x4BF,RVVM2x3BF,RVVM1x3BF,RVVMF2x3BF,RVVMF4x3BF,
RVVM4x2BF,RVVM2x2BF,RVVM1x2BF,RVVMF2x2BF,RVVMF4x2BF,
RVVM1x8HF,RVVMF2x8HF,RVVMF4x8HF,RVVM1x7HF,RVVMF2x7HF,
RVVMF4x7HF,RVVM1x6HF,RVVMF2x6HF,RVVMF4x6HF,RVVM1x5HF,
RVVMF2x5HF,RVVMF4x5HF,RVVM2x4HF,RVVM1x4HF,RVVMF2x4HF,
RVVMF4x4HF,RVVM2x3HF,RVVM1x3HF,RVVMF2x3HF,RVVMF4x3HF,
RVVM4x2HF,RVVM2x2HF,RVVM1x2HF,RVVMF2x2HF,RVVMF4x2HF,
RVVM1x8SI,RVVMF2x8SI,
RVVM1x7SI,RVVMF2x7SI,
RVVM1x6SI,RVVMF2x6SI,
RVVM1x5SI,RVVMF2x5SI,
RVVM2x4SI,RVVM1x4SI,RVVMF2x4SI,
RVVM2x3SI,RVVM1x3SI,RVVMF2x3SI,
RVVM4x2SI,RVVM2x2SI,RVVM1x2SI,RVVMF2x2SI,
RVVM1x8SF,RVVMF2x8SF,RVVM1x7SF,RVVMF2x7SF,
RVVM1x6SF,RVVMF2x6SF,RVVM1x5SF,RVVMF2x5SF,
RVVM2x4SF,RVVM1x4SF,RVVMF2x4SF,RVVM2x3SF,
RVVM1x3SF,RVVMF2x3SF,RVVM4x2SF,RVVM2x2SF,
RVVM1x2SF,RVVMF2x2SF,
RVVM1x8DI,RVVM1x7DI,RVVM1x6DI,RVVM1x5DI,
RVVM2x4DI,RVVM1x4DI,RVVM2x3DI,RVVM1x3DI,
RVVM4x2DI,RVVM2x2DI,RVVM1x2DI,RVVM1x8DF,
RVVM1x7DF,RVVM1x6DF,RVVM1x5DF,RVVM2x4DF,
RVVM1x4DF,RVVM2x3DF,RVVM1x3DF,RVVM4x2DF,
RVVM2x2DF,RVVM1x2DF,
V1QI,V2QI,V4QI,V8QI,V16QI,V32QI,V64QI,V128QI,V256QI,V512QI,V1024QI,V2048QI,V4096QI,
V1HI,V2HI,V4HI,V8HI,V16HI,V32HI,V64HI,V128HI,V256HI,V512HI,V1024HI,V2048HI,
V1SI,V2SI,V4SI,V8SI,V16SI,V32SI,V64SI,V128SI,V256SI,V512SI,V1024SI,
V1DI,V2DI,V4DI,V8DI,V16DI,V32DI,V64DI,V128DI,V256DI,V512DI,
V1HF,V2HF,V4HF,V8HF,V16HF,V32HF,V64HF,V128HF,V256HF,V512HF,V1024HF,V2048HF,
V1SF,V2SF,V4SF,V8SF,V16SF,V32SF,V64SF,V128SF,V256SF,V512SF,V1024SF,
V1DF,V2DF,V4DF,V8DF,V16DF,V32DF,V64DF,V128DF,V256DF,V512DF,
V1BI,V2BI,V4BI,V8BI,V16BI,V32BI,V64BI,V128BI,V256BI,V512BI,V1024BI,V2048BI,V4096BI"
(const_string "unknown"))
;; True if the main data type is twice the size of a word.
(define_attr "dword_mode" "no,yes"
(cond [(and (eq_attr "mode" "DI,DF")
(eq (symbol_ref "TARGET_64BIT") (const_int 0)))
(const_string "yes")
(and (eq_attr "mode" "TI,TF")
(ne (symbol_ref "TARGET_64BIT") (const_int 0)))
(const_string "yes")]
(const_string "no")))
;; ISA attributes.
(define_attr "ext" "base,f,d,vector"
(const_string "base"))
;; True if the extension is enabled.
(define_attr "ext_enabled" "no,yes"
(cond [(eq_attr "ext" "base")
(const_string "yes")
(and (eq_attr "ext" "f")
(match_test "TARGET_HARD_FLOAT"))
(const_string "yes")
(and (eq_attr "ext" "d")
(match_test "TARGET_DOUBLE_FLOAT"))
(const_string "yes")
(and (eq_attr "ext" "vector")
(match_test "TARGET_VECTOR"))
(const_string "yes")
]
(const_string "no")))
;; Classification of each insn.
;; branch conditional branch
;; jump unconditional direct jump
;; jalr unconditional indirect jump
;; ret various returns, no arguments
;; call unconditional call
;; load load instruction(s)
;; fpload floating point load
;; store store instruction(s)
;; fpstore floating point store
;; mtc transfer to coprocessor
;; mfc transfer from coprocessor
;; const load constant
;; arith integer arithmetic instructions
;; logical integer logical instructions
;; shift integer shift instructions
;; slt set less than instructions
;; imul integer multiply
;; idiv integer divide
;; move integer register move (addi rd, rs1, 0)
;; fmove floating point register move
;; fadd floating point add/subtract
;; fmul floating point multiply
;; fmadd floating point multiply-add
;; fdiv floating point divide
;; fcmp floating point compare
;; fcvt floating point convert
;; fcvt_i2f integer to floating point convert
;; fcvt_f2i floating point to integer convert
;; fsqrt floating point square root
;; multi multiword sequence (or user asm statements)
;; auipc integer addition to PC
;; sfb_alu SFB ALU instruction
;; nop no operation
;; trap trap instruction
;; ghost an instruction that produces no real code
;; bitmanip bit manipulation instructions
;; clmul clmul, clmulh, clmulr
;; rotate rotation instructions
;; atomic atomic instructions
;; condmove conditional moves
;; crypto cryptography instructions
;; mvpair zc move pair instructions
;; zicond zicond instructions
;; Classification of RVV instructions which will be added to each RVV .md pattern and used by scheduler.
;; rdvlenb vector byte length vlenb csrr read
;; rdvl vector length vl csrr read
;; wrvxrm vector fixed-point rounding mode write
;; wrfrm vector floating-point rounding mode write
;; vsetvl vector configuration-setting instrucions
;; 7. Vector Loads and Stores
;; vlde vector unit-stride load instructions
;; vste vector unit-stride store instructions
;; vldm vector unit-stride mask load instructions
;; vstm vector unit-stride mask store instructions
;; vlds vector strided load instructions
;; vsts vector strided store instructions
;; vldux vector unordered indexed load instructions
;; vldox vector ordered indexed load instructions
;; vstux vector unordered indexed store instructions
;; vstox vector ordered indexed store instructions
;; vldff vector unit-stride fault-only-first load instructions
;; vldr vector whole register load instructions
;; vstr vector whole register store instructions
;; vlsegde vector segment unit-stride load instructions
;; vssegte vector segment unit-stride store instructions
;; vlsegds vector segment strided load instructions
;; vssegts vector segment strided store instructions
;; vlsegdux vector segment unordered indexed load instructions
;; vlsegdox vector segment ordered indexed load instructions
;; vssegtux vector segment unordered indexed store instructions
;; vssegtox vector segment ordered indexed store instructions
;; vlsegdff vector segment unit-stride fault-only-first load instructions
;; 11. Vector integer arithmetic instructions
;; vialu vector single-width integer add and subtract and logical nstructions
;; viwalu vector widening integer add/subtract
;; vext vector integer extension
;; vicalu vector arithmetic with carry or borrow instructions
;; vshift vector single-width bit shift instructions
;; vnshift vector narrowing integer shift instructions
;; viminmax vector integer min/max instructions
;; vicmp vector integer comparison instructions
;; vimul vector single-width integer multiply instructions
;; vidiv vector single-width integer divide instructions
;; viwmul vector widening integer multiply instructions
;; vimuladd vector single-width integer multiply-add instructions
;; viwmuladd vector widening integer multiply-add instructions
;; vimerge vector integer merge instructions
;; vimov vector integer move vector instructions
;; 12. Vector fixed-point arithmetic instructions
;; vsalu vector single-width saturating add and subtract and logical instructions
;; vaalu vector single-width averaging add and subtract and logical instructions
;; vsmul vector single-width fractional multiply with rounding and saturation instructions
;; vsshift vector single-width scaling shift instructions
;; vnclip vector narrowing fixed-point clip instructions
;; 13. Vector floating-point instructions
;; vfalu vector single-width floating-point add/subtract instructions
;; vfwalu vector widening floating-point add/subtract instructions
;; vfmul vector single-width floating-point multiply instructions
;; vfdiv vector single-width floating-point divide instructions
;; vfwmul vector widening floating-point multiply instructions
;; vfmuladd vector single-width floating-point multiply-add instructions
;; vfwmuladd vector widening floating-point multiply-add instructions
;; vfsqrt vector floating-point square-root instructions
;; vfrecp vector floating-point reciprocal square-root instructions
;; vfminmax vector floating-point min/max instructions
;; vfcmp vector floating-point comparison instructions
;; vfsgnj vector floating-point sign-injection instructions
;; vfclass vector floating-point classify instruction
;; vfmerge vector floating-point merge instruction
;; vfmov vector floating-point move instruction
;; vfcvtitof vector single-width integer to floating-point instruction
;; vfcvtftoi vector single-width floating-point to integer instruction
;; vfwcvtitof vector widening integer to floating-point instruction
;; vfwcvtftoi vector widening floating-point to integer instruction
;; vfwcvtftof vector widening floating-point to floating-point instruction
;; vfncvtitof vector narrowing integer to floating-point instruction
;; vfncvtftoi vector narrowing floating-point to integer instruction
;; vfncvtftof vector narrowing floating-point to floating-point instruction
;; 14. Vector reduction operations
;; vired vector single-width integer reduction instructions
;; viwred vector widening integer reduction instructions
;; vfredu vector single-width floating-point un-ordered reduction instruction
;; vfredo vector single-width floating-point ordered reduction instruction
;; vfwredu vector widening floating-point un-ordered reduction instruction
;; vfwredo vector widening floating-point ordered reduction instruction
;; 15. Vector mask instructions
;; vmalu vector mask-register logical instructions
;; vmpop vector mask population count
;; vmffs vector find-first-set mask bit
;; vmsfs vector set mask bit
;; vmiota vector iota
;; vmidx vector element index instruction
;; 16. Vector permutation instructions
;; vimovvx integer scalar move instructions
;; vimovxv integer scalar move instructions
;; vfmovvf floating-point scalar move instructions
;; vfmovfv floating-point scalar move instructions
;; vslideup vector slide instructions
;; vslidedown vector slide instructions
;; vislide1up vector slide instructions
;; vislide1down vector slide instructions
;; vfslide1up vector slide instructions
;; vfslide1down vector slide instructions
;; vgather vector register gather instructions
;; vcompress vector compress instruction
;; vmov whole vector register move
;; vector unknown vector instruction
;; 17. Crypto Vector instructions
;; vandn crypto vector bitwise and-not instructions
;; vbrev crypto vector reverse bits in elements instructions
;; vbrev8 crypto vector reverse bits in bytes instructions
;; vrev8 crypto vector reverse bytes instructions
;; vclz crypto vector count leading Zeros instructions
;; vctz crypto vector count lrailing Zeros instructions
;; vrol crypto vector rotate left instructions
;; vror crypto vector rotate right instructions
;; vwsll crypto vector widening shift left logical instructions
;; vclmul crypto vector carry-less multiply - return low half instructions
;; vclmulh crypto vector carry-less multiply - return high half instructions
;; vghsh crypto vector add-multiply over GHASH Galois-Field instructions
;; vgmul crypto vector multiply over GHASH Galois-Field instrumctions
;; vaesef crypto vector AES final-round encryption instructions
;; vaesem crypto vector AES middle-round encryption instructions
;; vaesdf crypto vector AES final-round decryption instructions
;; vaesdm crypto vector AES middle-round decryption instructions
;; vaeskf1 crypto vector AES-128 Forward KeySchedule generation instructions
;; vaeskf2 crypto vector AES-256 Forward KeySchedule generation instructions
;; vaesz crypto vector AES round zero encryption/decryption instructions
;; vsha2ms crypto vector SHA-2 message schedule instructions
;; vsha2ch crypto vector SHA-2 two rounds of compression instructions
;; vsha2cl crypto vector SHA-2 two rounds of compression instructions
;; vsm4k crypto vector SM4 KeyExpansion instructions
;; vsm4r crypto vector SM4 Rounds instructions
;; vsm3me crypto vector SM3 Message Expansion instructions
;; vsm3c crypto vector SM3 Compression instructions
;; 18.Vector BF16 instrctions
;; vfncvtbf16 vector narrowing single floating-point to brain floating-point instruction
;; vfwcvtbf16 vector widening brain floating-point to single floating-point instruction
;; vfwmaccbf16 vector BF16 widening multiply-accumulate
(define_attr "type"
"unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore,
mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
fmadd,fdiv,fcmp,fcvt,fcvt_i2f,fcvt_f2i,fsqrt,multi,auipc,sfb_alu,nop,trap,
ghost,bitmanip,rotate,clmul,min,max,minu,maxu,clz,ctz,cpop,
atomic,condmove,crypto,mvpair,zicond,rdvlenb,rdvl,wrvxrm,wrfrm,
rdfrm,vsetvl,vsetvl_pre,vlde,vste,vldm,vstm,vlds,vsts,
vldux,vldox,vstux,vstox,vldff,vldr,vstr,
vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,vssegtux,vssegtox,vlsegdff,
vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,
vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,
vsalu,vaalu,vsmul,vsshift,vnclip,
vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,
vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov,
vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,
vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,
vired,viwred,vfredu,vfredo,vfwredu,vfwredo,
vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vcpop,vrol,vror,vwsll,
vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,vaeskf1,vaeskf2,vaesz,
vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16"
(cond [(eq_attr "got" "load") (const_string "load")
;; If a doubleword move uses these expensive instructions,
;; it is usually better to schedule them in the same way
;; as the singleword form, rather than as "multi".
(eq_attr "move_type" "load") (const_string "load")
(eq_attr "move_type" "fpload") (const_string "fpload")
(eq_attr "move_type" "store") (const_string "store")
(eq_attr "move_type" "fpstore") (const_string "fpstore")
(eq_attr "move_type" "mtc") (const_string "mtc")
(eq_attr "move_type" "mfc") (const_string "mfc")
;; These types of move are always single insns.
(eq_attr "move_type" "fmove") (const_string "fmove")
(eq_attr "move_type" "arith") (const_string "arith")
(eq_attr "move_type" "logical") (const_string "logical")
(eq_attr "move_type" "andi") (const_string "logical")
;; These types of move are always split.
(eq_attr "move_type" "shift_shift")
(const_string "multi")
;; These types of move are split for doubleword modes only.
(and (eq_attr "move_type" "move,const")
(eq_attr "dword_mode" "yes"))
(const_string "multi")
(eq_attr "move_type" "move") (const_string "move")
(eq_attr "move_type" "const") (const_string "const")
(eq_attr "move_type" "rdvlenb") (const_string "rdvlenb")]
(const_string "unknown")))
;; True if the float point vector is disabled.
(define_attr "fp_vector_disabled" "no,yes"
(cond [
(and (eq_attr "type" "vfmov,vfalu,vfmul,vfdiv,
vfwalu,vfwmul,vfmuladd,vfwmuladd,
vfsqrt,vfrecp,vfminmax,vfsgnj,vfcmp,
vfclass,vfmerge,
vfncvtitof,vfwcvtftoi,vfcvtftoi,vfcvtitof,
vfredo,vfredu,vfwredo,vfwredu,
vfslide1up,vfslide1down")
(and (eq_attr "mode" "RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF")
(match_test "!TARGET_ZVFH")))
(const_string "yes")
;; The mode records as QI for the FP16 <=> INT8 instruction.
(and (eq_attr "type" "vfncvtftoi,vfwcvtitof")
(and (eq_attr "mode" "RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI")
(match_test "!TARGET_ZVFH")))
(const_string "yes")
]
(const_string "no")))
;; This attribute marks the alternatives not matching the constraints
;; described in spec as disabled.
(define_attr "spec_restriction" "none,thv,rvv"
(const_string "none"))
(define_attr "spec_restriction_disabled" "no,yes"
(cond [(eq_attr "spec_restriction" "none")
(const_string "no")
(and (eq_attr "spec_restriction" "thv")
(match_test "TARGET_XTHEADVECTOR"))
(const_string "yes")
(and (eq_attr "spec_restriction" "rvv")
(match_test "TARGET_VECTOR && !TARGET_XTHEADVECTOR"))
(const_string "yes")
]
(const_string "no")))
;; Attribute to control enable or disable instructions.
(define_attr "enabled" "no,yes"
(cond [
(eq_attr "ext_enabled" "no")
(const_string "no")
(eq_attr "fp_vector_disabled" "yes")
(const_string "no")
(eq_attr "spec_restriction_disabled" "yes")
(const_string "no")
]
(const_string "yes")))
;; Length of instruction in bytes.
(define_attr "length" ""
(cond [
;; Branches further than +/- 1 MiB require three instructions.
;; Branches further than +/- 4 KiB require two instructions.
(eq_attr "type" "branch")
(if_then_else (and (le (minus (match_dup 0) (pc))
(const_int 4088))
(le (minus (pc) (match_dup 0))
(const_int 4092)))
(const_int 4)
(if_then_else (and (le (minus (match_dup 0) (pc))
(const_int 1048568))
(le (minus (pc) (match_dup 0))
(const_int 1048572)))
(const_int 8)
(const_int 12)))
;; Jumps further than +/- 1 MiB require two instructions.
(eq_attr "type" "jump")
(if_then_else (and (le (minus (match_dup 0) (pc))
(const_int 1048568))
(le (minus (pc) (match_dup 0))
(const_int 1048572)))
(const_int 4)
(const_int 8))
;; Conservatively assume calls take two instructions (AUIPC + JALR).
;; The linker will opportunistically relax the sequence to JAL.
(eq_attr "type" "call") (const_int 8)
;; "Ghost" instructions occupy no space.
(eq_attr "type" "ghost") (const_int 0)
(eq_attr "got" "load") (const_int 8)
;; SHIFT_SHIFTs are decomposed into two separate instructions.
(eq_attr "move_type" "shift_shift")
(const_int 8)
;; Check for doubleword moves that are decomposed into two
;; instructions.
(and (eq_attr "move_type" "mtc,mfc,move")
(eq_attr "dword_mode" "yes"))
(const_int 8)
;; Doubleword CONST{,N} moves are split into two word
;; CONST{,N} moves.
(and (eq_attr "move_type" "const")
(eq_attr "dword_mode" "yes"))
(symbol_ref "riscv_split_const_insns (operands[1]) * 4")
;; Otherwise, constants, loads and stores are handled by external
;; routines.
(eq_attr "move_type" "load,fpload")
(symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
(eq_attr "move_type" "store,fpstore")
(symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
] (const_int 4)))
;; Is copying of this instruction disallowed?
(define_attr "cannot_copy" "no,yes" (const_string "no"))
;; Microarchitectures we know how to tune for.
;; Keep this in sync with enum riscv_microarchitecture.
(define_attr "tune"
"generic,sifive_7,sifive_p400,sifive_p600,xiangshan,generic_ooo"
(const (symbol_ref "((enum attr_tune) riscv_microarchitecture)")))
;; Describe a user's asm statement.
(define_asm_attributes
[(set_attr "type" "multi")])
;; Ghost instructions produce no real code and introduce no hazards.
;; They exist purely to express an effect on dataflow.
(define_insn_reservation "ghost" 0
(eq_attr "type" "ghost")
"nothing")
;;
;; ....................
;;
;; ADDITION
;;
;; ....................
;;
(define_insn "add3"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(plus:ANYF (match_operand:ANYF 1 "register_operand" " f")
(match_operand:ANYF 2 "register_operand" " f")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fadd.\t%0,%1,%2"
[(set_attr "type" "fadd")
(set_attr "mode" "")])
(define_insn "*addsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_operand:SI 1 "register_operand" " r,r")
(match_operand:SI 2 "arith_operand" " r,I")))]
""
"add%i2%~\t%0,%1,%2"
[(set_attr "type" "arith")
(set_attr "mode" "SI")])
(define_expand "addsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_operand:SI 1 "register_operand" " r,r")
(match_operand:SI 2 "arith_operand" " r,I")))]
""
{
if (TARGET_64BIT)
{
rtx t = gen_reg_rtx (DImode);
emit_insn (gen_addsi3_extended (t, operands[1], operands[2]));
t = gen_lowpart (SImode, t);
SUBREG_PROMOTED_VAR_P (t) = 1;
SUBREG_PROMOTED_SET (t, SRP_SIGNED);
emit_move_insn (operands[0], t);
DONE;
}
})
(define_insn "adddi3"
[(set (match_operand:DI 0 "register_operand" "=r,r")
(plus:DI (match_operand:DI 1 "register_operand" " r,r")
(match_operand:DI 2 "arith_operand" " r,I")))]
"TARGET_64BIT"
"add%i2\t%0,%1,%2"
[(set_attr "type" "arith")
(set_attr "mode" "DI")])
;; Special case of adding a reg and constant if latter is sum of two S12
;; values (in range -2048 to 2047). Avoid materialized the const and fuse
;; into the add (with an additional add for 2nd value). Makes a 3 insn
;; sequence into 2 insn.
(define_insn_and_split "*add3_const_sum_of_two_s12"
[(set (match_operand:P 0 "register_operand" "=r,r")
(plus:P (match_operand:P 1 "register_operand" " r,r")
(match_operand:P 2 "const_two_s12" " MiG,r")))]
"!riscv_reg_frame_related (operands[0])"
{
/* operand matching MiG constraint is always meant to be split. */
if (which_alternative == 0)
return "#";
else
return "add %0,%1,%2";
}
""
[(set (match_dup 0)
(plus:P (match_dup 1) (match_dup 3)))
(set (match_dup 0)
(plus:P (match_dup 0) (match_dup 4)))]
{
int val = INTVAL (operands[2]);
if (SUM_OF_TWO_S12_P (val))
{
operands[3] = GEN_INT (2047);
operands[4] = GEN_INT (val - 2047);
}
else if (SUM_OF_TWO_S12_N (val))
{
operands[3] = GEN_INT (-2048);
operands[4] = GEN_INT (val + 2048);
}
else
gcc_unreachable ();
}
[(set_attr "type" "arith")
(set_attr "mode" "")])
(define_expand "addv4"
[(set (match_operand:GPR 0 "register_operand" "=r,r")
(plus:GPR (match_operand:GPR 1 "register_operand" " r,r")
(match_operand:GPR 2 "arith_operand" " r,I")))
(label_ref (match_operand 3 "" ""))]
""
{
if (TARGET_64BIT && mode == SImode)
{
rtx t3 = gen_reg_rtx (DImode);
rtx t4 = gen_reg_rtx (DImode);
rtx t5 = gen_reg_rtx (DImode);
rtx t6 = gen_reg_rtx (DImode);
riscv_emit_binary (PLUS, operands[0], operands[1], operands[2]);
if (GET_CODE (operands[1]) != CONST_INT)
emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
else
t4 = operands[1];
if (GET_CODE (operands[2]) != CONST_INT)
emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
else
t5 = operands[2];
emit_insn (gen_adddi3 (t3, t4, t5));
emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
riscv_expand_conditional_branch (operands[3], NE, t6, t3);
}
else
{
rtx t3 = gen_reg_rtx (mode);
rtx t4 = gen_reg_rtx (mode);
emit_insn (gen_add3_insn (operands[0], operands[1], operands[2]));
rtx cmp1 = gen_rtx_LT (mode, operands[2], const0_rtx);
emit_insn (gen_cstore4 (t3, cmp1, operands[2], const0_rtx));
rtx cmp2 = gen_rtx_LT (mode, operands[0], operands[1]);
emit_insn (gen_cstore4 (t4, cmp2, operands[0], operands[1]));
riscv_expand_conditional_branch (operands[3], NE, t3, t4);
}
DONE;
})
(define_expand "uaddv4"
[(set (match_operand:GPR 0 "register_operand" "=r,r")
(plus:GPR (match_operand:GPR 1 "register_operand" " r,r")
(match_operand:GPR 2 "arith_operand" " r,I")))
(label_ref (match_operand 3 "" ""))]
""
{
if (TARGET_64BIT && mode == SImode)
{
rtx t3 = gen_reg_rtx (DImode);
rtx t4 = gen_reg_rtx (DImode);
if (GET_CODE (operands[1]) != CONST_INT)
emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
else
t3 = operands[1];
riscv_emit_binary (PLUS, operands[0], operands[1], operands[2]);
emit_insn (gen_extend_insn (t4, operands[0], DImode, SImode, 0));
riscv_expand_conditional_branch (operands[3], LTU, t4, t3);
}
else
{
emit_insn (gen_add3_insn (operands[0], operands[1], operands[2]));
riscv_expand_conditional_branch (operands[3], LTU, operands[0],
operands[1]);
}
DONE;
})
(define_insn "addsi3_extended"
[(set (match_operand:DI 0 "register_operand" "=r,r")
(sign_extend:DI
(plus:SI (match_operand:SI 1 "register_operand" " r,r")
(match_operand:SI 2 "arith_operand" " r,I"))))]
"TARGET_64BIT"
"add%i2w\t%0,%1,%2"
[(set_attr "type" "arith")
(set_attr "mode" "SI")])
(define_insn "*addsi3_extended2"
[(set (match_operand:DI 0 "register_operand" "=r,r")
(sign_extend:DI
(match_operator:SI 3 "subreg_lowpart_operator"
[(plus:DI (match_operand:DI 1 "register_operand" " r,r")
(match_operand:DI 2 "arith_operand" " r,I"))])))]
"TARGET_64BIT"
"add%i2w\t%0,%1,%2"
[(set_attr "type" "arith")
(set_attr "mode" "SI")])
;;
;; ....................
;;
;; SUBTRACTION
;;
;; ....................
;;
(define_insn "sub3"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(minus:ANYF (match_operand:ANYF 1 "register_operand" " f")
(match_operand:ANYF 2 "register_operand" " f")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fsub.\t%0,%1,%2"
[(set_attr "type" "fadd")
(set_attr "mode" "")])
(define_insn "subdi3"
[(set (match_operand:DI 0 "register_operand" "= r")
(minus:DI (match_operand:DI 1 "reg_or_0_operand" " rJ")
(match_operand:DI 2 "register_operand" " r")))]
"TARGET_64BIT"
"sub\t%0,%z1,%2"
[(set_attr "type" "arith")
(set_attr "mode" "DI")])
(define_insn "*subsi3"
[(set (match_operand:SI 0 "register_operand" "= r")
(minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
(match_operand:SI 2 "register_operand" " r")))]
""
"sub%~\t%0,%z1,%2"
[(set_attr "type" "arith")
(set_attr "mode" "SI")])
(define_expand "subsi3"
[(set (match_operand:SI 0 "register_operand" "= r")
(minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
(match_operand:SI 2 "register_operand" " r")))]
""
{
if (TARGET_64BIT)
{
rtx t = gen_reg_rtx (DImode);
emit_insn (gen_subsi3_extended (t, operands[1], operands[2]));
t = gen_lowpart (SImode, t);
SUBREG_PROMOTED_VAR_P (t) = 1;
SUBREG_PROMOTED_SET (t, SRP_SIGNED);
emit_move_insn (operands[0], t);
DONE;
}
})
(define_expand "subv4"
[(set (match_operand:GPR 0 "register_operand" "= r")
(minus:GPR (match_operand:GPR 1 "reg_or_0_operand" " rJ")
(match_operand:GPR 2 "register_operand" " r")))
(label_ref (match_operand 3 "" ""))]
""
{
if (TARGET_64BIT && mode == SImode)
{
rtx t3 = gen_reg_rtx (DImode);
rtx t4 = gen_reg_rtx (DImode);
rtx t5 = gen_reg_rtx (DImode);
rtx t6 = gen_reg_rtx (DImode);
riscv_emit_binary (MINUS, operands[0], operands[1], operands[2]);
if (GET_CODE (operands[1]) != CONST_INT)
emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
else
t4 = operands[1];
if (GET_CODE (operands[2]) != CONST_INT)
emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
else
t5 = operands[2];
emit_insn (gen_subdi3 (t3, t4, t5));
emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
riscv_expand_conditional_branch (operands[3], NE, t6, t3);
}
else
{
rtx t3 = gen_reg_rtx (mode);
rtx t4 = gen_reg_rtx (mode);
emit_insn (gen_sub3_insn (operands[0], operands[1], operands[2]));
rtx cmp1 = gen_rtx_LT (mode, operands[2], const0_rtx);
emit_insn (gen_cstore4 (t3, cmp1, operands[2], const0_rtx));
rtx cmp2 = gen_rtx_LT (mode, operands[1], operands[0]);
emit_insn (gen_cstore4 (t4, cmp2, operands[1], operands[0]));
riscv_expand_conditional_branch (operands[3], NE, t3, t4);
}
DONE;
})
(define_expand "usubv4"
[(set (match_operand:GPR 0 "register_operand" "= r")
(minus:GPR (match_operand:GPR 1 "reg_or_0_operand" " rJ")
(match_operand:GPR 2 "register_operand" " r")))
(label_ref (match_operand 3 "" ""))]
""
{
if (TARGET_64BIT && mode == SImode)
{
rtx t3 = gen_reg_rtx (DImode);
rtx t4 = gen_reg_rtx (DImode);
if (GET_CODE (operands[1]) != CONST_INT)
emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
else
t3 = operands[1];
riscv_emit_binary (MINUS, operands[0], operands[1], operands[2]);
emit_insn (gen_extend_insn (t4, operands[0], DImode, SImode, 0));
riscv_expand_conditional_branch (operands[3], LTU, t3, t4);
}
else
{
emit_insn (gen_sub3_insn (operands[0], operands[1], operands[2]));
riscv_expand_conditional_branch (operands[3], LTU, operands[1],
operands[0]);
}
DONE;
})
(define_insn "subsi3_extended"
[(set (match_operand:DI 0 "register_operand" "= r")
(sign_extend:DI
(minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
(match_operand:SI 2 "register_operand" " r"))))]
"TARGET_64BIT"
"subw\t%0,%z1,%2"
[(set_attr "type" "arith")
(set_attr "mode" "SI")])
(define_insn "*subsi3_extended2"
[(set (match_operand:DI 0 "register_operand" "= r")
(sign_extend:DI
(match_operator:SI 3 "subreg_lowpart_operator"
[(minus:DI (match_operand:DI 1 "reg_or_0_operand" " rJ")
(match_operand:DI 2 "register_operand" " r"))])))]
"TARGET_64BIT"
"subw\t%0,%z1,%2"
[(set_attr "type" "arith")
(set_attr "mode" "SI")])
(define_insn "negdi2"
[(set (match_operand:DI 0 "register_operand" "=r")
(neg:DI (match_operand:DI 1 "register_operand" " r")))]
"TARGET_64BIT"
"neg\t%0,%1"
[(set_attr "type" "arith")
(set_attr "mode" "DI")])
(define_insn "*negsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_operand:SI 1 "register_operand" " r")))]
""
"neg%~\t%0,%1"
[(set_attr "type" "arith")
(set_attr "mode" "SI")])
(define_expand "negsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_operand:SI 1 "register_operand" " r")))]
""
{
if (TARGET_64BIT)
{
rtx t = gen_reg_rtx (DImode);
emit_insn (gen_negsi2_extended (t, operands[1]));
t = gen_lowpart (SImode, t);
SUBREG_PROMOTED_VAR_P (t) = 1;
SUBREG_PROMOTED_SET (t, SRP_SIGNED);
emit_move_insn (operands[0], t);
DONE;
}
})
(define_insn "negsi2_extended"
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI
(neg:SI (match_operand:SI 1 "register_operand" " r"))))]
"TARGET_64BIT"
"negw\t%0,%1"
[(set_attr "type" "arith")
(set_attr "mode" "SI")])
(define_insn "*negsi2_extended2"
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI
(match_operator:SI 2 "subreg_lowpart_operator"
[(neg:DI (match_operand:DI 1 "register_operand" " r"))])))]
"TARGET_64BIT"
"negw\t%0,%1"
[(set_attr "type" "arith")
(set_attr "mode" "SI")])
;;
;; ....................
;;
;; MULTIPLICATION
;;
;; ....................
;;
(define_insn "mul3"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(mult:ANYF (match_operand:ANYF 1 "register_operand" " f")
(match_operand:ANYF 2 "register_operand" " f")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fmul.\t%0,%1,%2"
[(set_attr "type" "fmul")
(set_attr "mode" "")])
(define_insn "*mulsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(mult:SI (match_operand:SI 1 "register_operand" " r")
(match_operand:SI 2 "register_operand" " r")))]
"TARGET_ZMMUL || TARGET_MUL"
"mul%~\t%0,%1,%2"
[(set_attr "type" "imul")
(set_attr "mode" "SI")])
(define_expand "mulsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(mult:SI (match_operand:SI 1 "register_operand" " r")
(match_operand:SI 2 "register_operand" " r")))]
"TARGET_ZMMUL || TARGET_MUL"
{
if (TARGET_64BIT)
{
rtx t = gen_reg_rtx (DImode);
emit_insn (gen_mulsi3_extended (t, operands[1], operands[2]));
t = gen_lowpart (SImode, t);
SUBREG_PROMOTED_VAR_P (t) = 1;
SUBREG_PROMOTED_SET (t, SRP_SIGNED);
emit_move_insn (operands[0], t);
DONE;
}
})
(define_insn "muldi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (match_operand:DI 1 "register_operand" " r")
(match_operand:DI 2 "register_operand" " r")))]
"(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
"mul\t%0,%1,%2"
[(set_attr "type" "imul")
(set_attr "mode" "DI")])
(define_expand "mulv4"
[(set (match_operand:GPR 0 "register_operand" "=r")
(mult:GPR (match_operand:GPR 1 "register_operand" " r")
(match_operand:GPR 2 "register_operand" " r")))
(label_ref (match_operand 3 "" ""))]
"TARGET_ZMMUL || TARGET_MUL"
{
if (TARGET_64BIT && mode == SImode)
{
rtx t3 = gen_reg_rtx (DImode);
rtx t4 = gen_reg_rtx (DImode);
rtx t5 = gen_reg_rtx (DImode);
rtx t6 = gen_reg_rtx (DImode);
if (GET_CODE (operands[1]) != CONST_INT)
emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
else
t4 = operands[1];
if (GET_CODE (operands[2]) != CONST_INT)
emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
else
t5 = operands[2];
emit_insn (gen_muldi3 (t3, t4, t5));
emit_move_insn (operands[0], gen_lowpart (SImode, t3));
emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
riscv_expand_conditional_branch (operands[3], NE, t6, t3);
}
else
{
rtx hp = gen_reg_rtx (mode);
rtx lp = gen_reg_rtx (mode);
emit_insn (gen_smul3_highpart (hp, operands[1], operands[2]));
emit_insn (gen_mul3 (operands[0], operands[1], operands[2]));
riscv_emit_binary (ASHIFTRT, lp, operands[0],
GEN_INT (BITS_PER_WORD - 1));
riscv_expand_conditional_branch (operands[3], NE, hp, lp);
}
DONE;
})
(define_expand "umulv4"
[(set (match_operand:GPR 0 "register_operand" "=r")
(mult:GPR (match_operand:GPR 1 "register_operand" " r")
(match_operand:GPR 2 "register_operand" " r")))
(label_ref (match_operand 3 "" ""))]
"TARGET_ZMMUL || TARGET_MUL"
{
if (TARGET_64BIT && mode == SImode)
{
rtx t3 = gen_reg_rtx (DImode);
rtx t4 = gen_reg_rtx (DImode);
rtx t5 = gen_reg_rtx (DImode);
rtx t6 = gen_reg_rtx (DImode);
rtx t7 = gen_reg_rtx (DImode);
rtx t8 = gen_reg_rtx (DImode);
if (GET_CODE (operands[1]) != CONST_INT)
emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
else
t3 = operands[1];
if (GET_CODE (operands[2]) != CONST_INT)
emit_insn (gen_extend_insn (t4, operands[2], DImode, SImode, 0));
else
t4 = operands[2];
emit_insn (gen_ashldi3 (t5, t3, GEN_INT (32)));
emit_insn (gen_ashldi3 (t6, t4, GEN_INT (32)));
emit_insn (gen_umuldi3_highpart (t7, t5, t6));
emit_move_insn (operands[0], gen_lowpart (SImode, t7));
emit_insn (gen_lshrdi3 (t8, t7, GEN_INT (32)));
riscv_expand_conditional_branch (operands[3], NE, t8, const0_rtx);
}
else
{
rtx hp = gen_reg_rtx (mode);
emit_insn (gen_umul3_highpart (hp, operands[1], operands[2]));
emit_insn (gen_mul3 (operands[0], operands[1], operands[2]));
riscv_expand_conditional_branch (operands[3], NE, hp, const0_rtx);
}
DONE;
})
(define_insn "mulsi3_extended"
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI
(mult:SI (match_operand:SI 1 "register_operand" " r")
(match_operand:SI 2 "register_operand" " r"))))]
"(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
"mulw\t%0,%1,%2"
[(set_attr "type" "imul")
(set_attr "mode" "SI")])
(define_insn "*mulsi3_extended2"
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI
(match_operator:SI 3 "subreg_lowpart_operator"
[(mult:DI (match_operand:DI 1 "register_operand" " r")
(match_operand:DI 2 "register_operand" " r"))])))]
"(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
"mulw\t%0,%1,%2"
[(set_attr "type" "imul")
(set_attr "mode" "SI")])
;;
;; ........................
;;
;; MULTIPLICATION HIGH-PART
;;
;; ........................
;;
(define_expand "mulditi3"
[(set (match_operand:TI 0 "register_operand")
(mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
(any_extend:TI (match_operand:DI 2 "register_operand"))))]
"(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
{
rtx low = gen_reg_rtx (DImode);
emit_insn (gen_muldi3 (low, operands[1], operands[2]));
rtx high = gen_reg_rtx (DImode);
emit_insn (gen_muldi3_highpart (high, operands[1], operands[2]));
emit_move_insn (gen_lowpart (DImode, operands[0]), low);
emit_move_insn (gen_highpart (DImode, operands[0]), high);
DONE;
})
(define_insn "muldi3_highpart"
[(set (match_operand:DI 0 "register_operand" "=r")
(truncate:DI
(lshiftrt:TI
(mult:TI (any_extend:TI
(match_operand:DI 1 "register_operand" " r"))
(any_extend:TI
(match_operand:DI 2 "register_operand" " r")))
(const_int 64))))]
"(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
"mulh\t%0,%1,%2"
[(set_attr "type" "imul")
(set_attr "mode" "DI")])
(define_expand "usmulditi3"
[(set (match_operand:TI 0 "register_operand")
(mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand"))
(sign_extend:TI (match_operand:DI 2 "register_operand"))))]
"(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
{
rtx low = gen_reg_rtx (DImode);
emit_insn (gen_muldi3 (low, operands[1], operands[2]));
rtx high = gen_reg_rtx (DImode);
emit_insn (gen_usmuldi3_highpart (high, operands[1], operands[2]));
emit_move_insn (gen_lowpart (DImode, operands[0]), low);
emit_move_insn (gen_highpart (DImode, operands[0]), high);
DONE;
})
(define_insn "usmuldi3_highpart"
[(set (match_operand:DI 0 "register_operand" "=r")
(truncate:DI
(lshiftrt:TI
(mult:TI (zero_extend:TI
(match_operand:DI 1 "register_operand" "r"))
(sign_extend:TI
(match_operand:DI 2 "register_operand" " r")))
(const_int 64))))]
"(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
"mulhsu\t%0,%2,%1"
[(set_attr "type" "imul")
(set_attr "mode" "DI")])
(define_expand "mulsidi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (any_extend:DI
(match_operand:SI 1 "register_operand" " r"))
(any_extend:DI
(match_operand:SI 2 "register_operand" " r"))))]
"(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
{
rtx temp = gen_reg_rtx (SImode);
riscv_emit_binary (MULT, temp, operands[1], operands[2]);
emit_insn (gen_mulsi3_highpart (riscv_subword (operands[0], true),
operands[1], operands[2]));
emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
DONE;
})
(define_insn "mulsi3_highpart"
[(set (match_operand:SI 0 "register_operand" "=r")
(truncate:SI
(lshiftrt:DI
(mult:DI (any_extend:DI
(match_operand:SI 1 "register_operand" " r"))
(any_extend:DI
(match_operand:SI 2 "register_operand" " r")))
(const_int 32))))]
"(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
"mulh\t%0,%1,%2"
[(set_attr "type" "imul")
(set_attr "mode" "SI")])
(define_expand "usmulsidi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (zero_extend:DI
(match_operand:SI 1 "register_operand" " r"))
(sign_extend:DI
(match_operand:SI 2 "register_operand" " r"))))]
"(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
{
rtx temp = gen_reg_rtx (SImode);
riscv_emit_binary (MULT, temp, operands[1], operands[2]);
emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
operands[1], operands[2]));
emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
DONE;
})
(define_insn "usmulsi3_highpart"
[(set (match_operand:SI 0 "register_operand" "=r")
(truncate:SI
(lshiftrt:DI
(mult:DI (zero_extend:DI
(match_operand:SI 1 "register_operand" " r"))
(sign_extend:DI
(match_operand:SI 2 "register_operand" " r")))
(const_int 32))))]
"(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
"mulhsu\t%0,%2,%1"
[(set_attr "type" "imul")
(set_attr "mode" "SI")])
;;
;; ....................
;;
;; DIVISION and REMAINDER
;;
;; ....................
;;
(define_insn "*si3"
[(set (match_operand:SI 0 "register_operand" "=r")
(any_div:SI (match_operand:SI 1 "register_operand" " r")
(match_operand:SI 2 "register_operand" " r")))]
"TARGET_DIV"
"%i2%~\t%0,%1,%2"
[(set_attr "type" "idiv")
(set_attr "mode" "SI")])
(define_expand "si3"
[(set (match_operand:SI 0 "register_operand" "=r")
(any_div:SI (match_operand:SI 1 "register_operand" " r")
(match_operand:SI 2 "register_operand" " r")))]
"TARGET_DIV"
{
if (TARGET_64BIT)
{
rtx t = gen_reg_rtx (DImode);
emit_insn (gen_si3_extended (t, operands[1], operands[2]));
t = gen_lowpart (SImode, t);
SUBREG_PROMOTED_VAR_P (t) = 1;
SUBREG_PROMOTED_SET (t, SRP_SIGNED);
emit_move_insn (operands[0], t);
DONE;
}
})
(define_insn "di3"
[(set (match_operand:DI 0 "register_operand" "=r")
(any_div:DI (match_operand:DI 1 "register_operand" " r")
(match_operand:DI 2 "register_operand" " r")))]
"TARGET_DIV && TARGET_64BIT"
"%i2\t%0,%1,%2"
[(set_attr "type" "idiv")
(set_attr "mode" "DI")])
(define_expand "divmod4"
[(parallel
[(set (match_operand:GPR 0 "register_operand")
(only_div:GPR (match_operand:GPR 1 "register_operand")
(match_operand:GPR 2 "register_operand")))
(set (match_operand:GPR 3 "register_operand")
(:GPR (match_dup 1) (match_dup 2)))])]
"TARGET_DIV && riscv_use_divmod_expander ()"
{
rtx tmp = gen_reg_rtx (mode);
emit_insn (gen_div3 (operands[0], operands[1], operands[2]));
emit_insn (gen_mul3 (tmp, operands[0], operands[2]));
emit_insn (gen_sub3 (operands[3], operands[1], tmp));
DONE;
})
(define_insn "si3_extended"
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI
(any_div:SI (match_operand:SI 1 "register_operand" " r")
(match_operand:SI 2 "register_operand" " r"))))]
"TARGET_DIV && TARGET_64BIT"
"%i2w\t%0,%1,%2"
[(set_attr "type" "idiv")
(set_attr "mode" "DI")])
(define_insn "div3"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(div:ANYF (match_operand:ANYF 1 "register_operand" " f")
(match_operand:ANYF 2 "register_operand" " f")))]
"(TARGET_HARD_FLOAT || TARGET_ZFINX) && TARGET_FDIV"
"fdiv.\t%0,%1,%2"
[(set_attr "type" "fdiv")
(set_attr "mode" "")])
;;
;; ....................
;;
;; SQUARE ROOT
;;
;; ....................
(define_insn "sqrt2"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(sqrt:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
"(TARGET_HARD_FLOAT || TARGET_ZFINX) && TARGET_FDIV"
{
return "fsqrt.\t%0,%1";
}
[(set_attr "type" "fsqrt")
(set_attr "mode" "")])
;; Floating point multiply accumulate instructions.
;; a * b + c
(define_insn "fma4"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(fma:ANYF (match_operand:ANYF 1 "register_operand" " f")
(match_operand:ANYF 2 "register_operand" " f")
(match_operand:ANYF 3 "register_operand" " f")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fmadd.\t%0,%1,%2,%3"
[(set_attr "type" "fmadd")
(set_attr "mode" "")])
;; a * b - c
(define_insn "fms4"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(fma:ANYF (match_operand:ANYF 1 "register_operand" " f")
(match_operand:ANYF 2 "register_operand" " f")
(neg:ANYF (match_operand:ANYF 3 "register_operand" " f"))))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fmsub.\t%0,%1,%2,%3"
[(set_attr "type" "fmadd")
(set_attr "mode" "")])
;; -a * b - c
(define_insn "fnms4"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(fma:ANYF
(neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
(match_operand:ANYF 2 "register_operand" " f")
(neg:ANYF (match_operand:ANYF 3 "register_operand" " f"))))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fnmadd.\t%0,%1,%2,%3"
[(set_attr "type" "fmadd")
(set_attr "mode" "")])
;; -a * b + c
(define_insn "fnma4"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(fma:ANYF
(neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
(match_operand:ANYF 2 "register_operand" " f")
(match_operand:ANYF 3 "register_operand" " f")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fnmsub.\t%0,%1,%2,%3"
[(set_attr "type" "fmadd")
(set_attr "mode" "")])
;; -(-a * b - c), modulo signed zeros
(define_insn "*fma4"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(neg:ANYF
(fma:ANYF
(neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
(match_operand:ANYF 2 "register_operand" " f")
(neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))]
"(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (mode)"
"fmadd.\t%0,%1,%2,%3"
[(set_attr "type" "fmadd")
(set_attr "mode" "")])
;; -(-a * b + c), modulo signed zeros
(define_insn "*fms4"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(neg:ANYF
(fma:ANYF
(neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
(match_operand:ANYF 2 "register_operand" " f")
(match_operand:ANYF 3 "register_operand" " f"))))]
"(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (mode)"
"fmsub.\t%0,%1,%2,%3"
[(set_attr "type" "fmadd")
(set_attr "mode" "")])
;; -(a * b + c), modulo signed zeros
(define_insn "*fnms4"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(neg:ANYF
(fma:ANYF
(match_operand:ANYF 1 "register_operand" " f")
(match_operand:ANYF 2 "register_operand" " f")
(match_operand:ANYF 3 "register_operand" " f"))))]
"(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (mode)"
"fnmadd.\t%0,%1,%2,%3"
[(set_attr "type" "fmadd")
(set_attr "mode" "")])
;; -(a * b - c), modulo signed zeros
(define_insn "*fnma4"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(neg:ANYF
(fma:ANYF
(match_operand:ANYF 1 "register_operand" " f")
(match_operand:ANYF 2 "register_operand" " f")
(neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))]
"(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (mode)"
"fnmsub.\t%0,%1,%2,%3"
[(set_attr "type" "fmadd")
(set_attr "mode" "")])
;;
;; ....................
;;
;; SIGN INJECTION
;;
;; ....................
(define_insn "abs2"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(abs:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fabs.\t%0,%1"
[(set_attr "type" "fmove")
(set_attr "mode" "")])
(define_insn "copysign3"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(unspec:ANYF [(match_operand:ANYF 1 "register_operand" " f")
(match_operand:ANYF 2 "register_operand" " f")]
UNSPEC_COPYSIGN))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fsgnj.\t%0,%1,%2"
[(set_attr "type" "fmove")
(set_attr "mode" "")])
(define_insn "neg2"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(neg:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fneg.\t%0,%1"
[(set_attr "type" "fmove")
(set_attr "mode" "")])
;;
;; ....................
;;
;; MIN/MAX
;;
;; ....................
(define_insn "fminm3"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
(use (match_operand:ANYF 2 "register_operand" " f"))]
UNSPEC_FMINM))]
"TARGET_HARD_FLOAT && TARGET_ZFA"
"fminm.\t%0,%1,%2"
[(set_attr "type" "fmove")
(set_attr "mode" "")])
(define_insn "fmaxm3"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
(use (match_operand:ANYF 2 "register_operand" " f"))]
UNSPEC_FMAXM))]
"TARGET_HARD_FLOAT && TARGET_ZFA"
"fmaxm.\t%0,%1,%2"
[(set_attr "type" "fmove")
(set_attr "mode" "")])
(define_insn "fmin3"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
(use (match_operand:ANYF 2 "register_operand" " f"))]
UNSPEC_FMIN))]
"(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SNANS (mode)"
"fmin.\t%0,%1,%2"
[(set_attr "type" "fmove")
(set_attr "mode" "")])
(define_insn "fmax3"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
(use (match_operand:ANYF 2 "register_operand" " f"))]
UNSPEC_FMAX))]
"(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SNANS (mode)"
"fmax.\t%0,%1,%2"
[(set_attr "type" "fmove")
(set_attr "mode" "")])
(define_insn "smin3"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(smin:ANYF (match_operand:ANYF 1 "register_operand" " f")
(match_operand:ANYF 2 "register_operand" " f")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fmin.\t%0,%1,%2"
[(set_attr "type" "fmove")
(set_attr "mode" "")])
(define_insn "smax3"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(smax:ANYF (match_operand:ANYF 1 "register_operand" " f")
(match_operand:ANYF 2 "register_operand" " f")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fmax.\t%0,%1,%2"
[(set_attr "type" "fmove")
(set_attr "mode" "")])
;;
;; ....................
;;
;; LOGICAL
;;
;; ....................
;;
;; For RV64, we don't expose the SImode operations to the rtl expanders,
;; but SImode versions exist for combine.
(define_expand "and3"
[(set (match_operand:X 0 "register_operand")
(and:X (match_operand:X 1 "register_operand")
(match_operand:X 2 "arith_or_mode_mask_or_zbs_operand")))]
""
{
/* If the second operand is a mode mask, emit an extension
insn instead. */
if (CONST_INT_P (operands[2]))
{
enum machine_mode tmode = VOIDmode;
if (UINTVAL (operands[2]) == GET_MODE_MASK (HImode))
tmode = HImode;
else if (UINTVAL (operands[2]) == GET_MODE_MASK (SImode))
tmode = SImode;
if (tmode != VOIDmode)
{
rtx tmp = gen_lowpart (tmode, operands[1]);
emit_insn (gen_extend_insn (operands[0], tmp, mode, tmode, 1));
DONE;
}
}
})
(define_insn "*and3"
[(set (match_operand:X 0 "register_operand" "=r,r")
(and:X (match_operand:X 1 "register_operand" "%r,r")
(match_operand:X 2 "arith_operand" " r,I")))]
""
"and%i2\t%0,%1,%2"
[(set_attr "type" "logical")
(set_attr "mode" "")])
;; When we construct constants we may want to twiddle a single bit
;; by generating an IOR. But the constant likely doesn't fit
;; arith_operand. So the generic code will reload the constant into
;; a register. Post-reload we won't have the chance to squash things
;; back into a Zbs insn.
;;
;; So indirect through a define_expand. That allows us to have a
;; predicate that conditionally accepts single bit constants without
;; putting the details of Zbs instructions in here.
(define_expand "3"
[(set (match_operand:X 0 "register_operand")
(any_or:X (match_operand:X 1 "register_operand" "")
(match_operand:X 2 "arith_or_zbs_operand" "")))]
"")
(define_insn "*3"
[(set (match_operand:X 0 "register_operand" "=r,r")
(any_or:X (match_operand:X 1 "register_operand" "%r,r")
(match_operand:X 2 "arith_operand" " r,I")))]
""
"%i2\t%0,%1,%2"
[(set_attr "type" "logical")
(set_attr "mode" "")])
(define_insn "*si3_internal"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(any_bitwise:SI (match_operand:SI 1 "register_operand" "%r,r")
(match_operand:SI 2 "arith_operand" " r,I")))]
"TARGET_64BIT"
"%i2\t%0,%1,%2"
[(set_attr "type" "logical")
(set_attr "mode" "SI")])
(define_insn "one_cmpl2"
[(set (match_operand:X 0 "register_operand" "=r")
(not:X (match_operand:X 1 "register_operand" " r")))]
""
"not\t%0,%1"
[(set_attr "type" "logical")
(set_attr "mode" "")])
(define_insn "*one_cmplsi2_internal"
[(set (match_operand:SI 0 "register_operand" "=r")
(not:SI (match_operand:SI 1 "register_operand" " r")))]
"TARGET_64BIT"
"not\t%0,%1"
[(set_attr "type" "logical")
(set_attr "mode" "SI")])
;;
;; ....................
;;
;; TRUNCATION
;;
;; ....................
(define_insn "truncdfsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(float_truncate:SF
(match_operand:DF 1 "register_operand" " f")))]
"TARGET_DOUBLE_FLOAT || TARGET_ZDINX"
"fcvt.s.d\t%0,%1"
[(set_attr "type" "fcvt")
(set_attr "mode" "SF")])
(define_insn "truncsfhf2"
[(set (match_operand:HF 0 "register_operand" "=f")
(float_truncate:HF
(match_operand:SF 1 "register_operand" " f")))]
"TARGET_ZFHMIN || TARGET_ZHINXMIN"
"fcvt.h.s\t%0,%1"
[(set_attr "type" "fcvt")
(set_attr "mode" "HF")])
(define_insn "truncdfhf2"
[(set (match_operand:HF 0 "register_operand" "=f")
(float_truncate:HF
(match_operand:DF 1 "register_operand" " f")))]
"(TARGET_ZFHMIN && TARGET_DOUBLE_FLOAT) ||
(TARGET_ZHINXMIN && TARGET_ZDINX)"
"fcvt.h.d\t%0,%1"
[(set_attr "type" "fcvt")
(set_attr "mode" "HF")])
(define_insn "truncsfbf2"
[(set (match_operand:BF 0 "register_operand" "=f")
(float_truncate:BF
(match_operand:SF 1 "register_operand" " f")))]
"TARGET_ZFBFMIN"
"fcvt.bf16.s\t%0,%1"
[(set_attr "type" "fcvt")
(set_attr "mode" "BF")])
;; The conversion of HF/DF/TF to BF needs to be done with SF if there is a
;; chance to generate at least one instruction, otherwise just using
;; libfunc __trunc[h|d|t]fbf2.
(define_expand "truncbf2"
[(set (match_operand:BF 0 "register_operand" "=f")
(float_truncate:BF
(match_operand:FBF 1 "register_operand" " f")))]
"TARGET_ZFBFMIN"
{
convert_move (operands[0],
convert_modes (SFmode, mode, operands[1], 0), 0);
DONE;
}
[(set_attr "type" "fcvt")
(set_attr "mode" "BF")])
;;
;; ....................
;;
;; ZERO EXTENSION
;;
;; ....................
;; Extension insns.
(define_expand "zero_extendsidi2"
[(set (match_operand:DI 0 "register_operand")
(zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]
"TARGET_64BIT")
(define_insn_and_split "*zero_extendsidi2_internal"
[(set (match_operand:DI 0 "register_operand" "=r,r")
(zero_extend:DI
(match_operand:SI 1 "nonimmediate_operand" " r,m")))]
"TARGET_64BIT && !TARGET_ZBA && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX
&& !(REG_P (operands[1]) && VL_REG_P (REGNO (operands[1])))"
"@
#
lwu\t%0,%1"
"&& reload_completed
&& REG_P (operands[1])
&& !paradoxical_subreg_p (operands[0])"
[(set (match_dup 0)
(ashift:DI (match_dup 1) (const_int 32)))
(set (match_dup 0)
(lshiftrt:DI (match_dup 0) (const_int 32)))]
{ operands[1] = gen_lowpart (DImode, operands[1]); }
[(set_attr "move_type" "shift_shift,load")
(set_attr "type" "load")
(set_attr "mode" "DI")])
(define_expand "zero_extendhi2"
[(set (match_operand:GPR 0 "register_operand")
(zero_extend:GPR
(match_operand:HI 1 "nonimmediate_operand")))]
"")
(define_insn_and_split "*zero_extendhi2"
[(set (match_operand:GPR 0 "register_operand" "=r,r")
(zero_extend:GPR
(match_operand:HI 1 "nonimmediate_operand" " r,m")))]
"!TARGET_ZBB && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX"
"@
#
lhu\t%0,%1"
"&& reload_completed
&& REG_P (operands[1])
&& !paradoxical_subreg_p (operands[0])"
[(set (match_dup 0)
(ashift:GPR (match_dup 1) (match_dup 2)))
(set (match_dup 0)
(lshiftrt:GPR (match_dup 0) (match_dup 2)))]
{
operands[1] = gen_lowpart (mode, operands[1]);
operands[2] = GEN_INT(GET_MODE_BITSIZE(mode) - 16);
}
[(set_attr "move_type" "shift_shift,load")
(set_attr "type" "load")
(set_attr "mode" "")])
(define_expand "zero_extendqi2"
[(set (match_operand:SUPERQI 0 "register_operand")
(zero_extend:SUPERQI
(match_operand:QI 1 "nonimmediate_operand")))]
"")
(define_insn "*zero_extendqi2_internal"
[(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
(zero_extend:SUPERQI
(match_operand:QI 1 "nonimmediate_operand" " r,m")))]
"!TARGET_XTHEADMEMIDX"
"@
andi\t%0,%1,0xff
lbu\t%0,%1"
[(set_attr "move_type" "andi,load")
(set_attr "type" "arith,load")
(set_attr "mode" "")])
;;
;; ....................
;;
;; SIGN EXTENSION
;;
;; ....................
(define_expand "extendsidi2"
[(set (match_operand:DI 0 "register_operand" "=r,r")
(sign_extend:DI
(match_operand:SI 1 "nonimmediate_operand" " r,m")))]
"TARGET_64BIT")
(define_insn "*extendsidi2_internal"
[(set (match_operand:DI 0 "register_operand" "=r,r")
(sign_extend:DI
(match_operand:SI 1 "nonimmediate_operand" " r,m")))]
"TARGET_64BIT && !TARGET_XTHEADMEMIDX"
"@
sext.w\t%0,%1
lw\t%0,%1"
[(set_attr "move_type" "move,load")
(set_attr "type" "move,load")
(set_attr "mode" "DI")])
(define_expand "extend2"
[(set (match_operand:SUPERQI 0 "register_operand")
(sign_extend:SUPERQI (match_operand:SHORT 1 "nonimmediate_operand")))]
"")
(define_insn_and_split "*extend2"
[(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
(sign_extend:SUPERQI
(match_operand:SHORT 1 "nonimmediate_operand" " r,m")))]
"!TARGET_ZBB && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX"
"@
#
l\t%0,%1"
"&& reload_completed
&& REG_P (operands[1])
&& !paradoxical_subreg_p (operands[0])"
[(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
(set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
{
operands[0] = gen_lowpart (SImode, operands[0]);
operands[1] = gen_lowpart (SImode, operands[1]);
operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
- GET_MODE_BITSIZE (mode));
}
[(set_attr "move_type" "shift_shift,load")
(set_attr "type" "load")
(set_attr "mode" "SI")])
(define_insn "extendhfsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(float_extend:SF
(match_operand:HF 1 "register_operand" " f")))]
"TARGET_ZFHMIN || TARGET_ZHINXMIN"
"fcvt.s.h\t%0,%1"
[(set_attr "type" "fcvt")
(set_attr "mode" "SF")])
(define_insn "extendbfsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(float_extend:SF
(match_operand:BF 1 "register_operand" " f")))]
"TARGET_ZFBFMIN"
"fcvt.s.bf16\t%0,%1"
[(set_attr "type" "fcvt")
(set_attr "mode" "SF")])
(define_insn "extendsfdf2"
[(set (match_operand:DF 0 "register_operand" "=f")
(float_extend:DF
(match_operand:SF 1 "register_operand" " f")))]
"TARGET_DOUBLE_FLOAT || TARGET_ZDINX"
"fcvt.d.s\t%0,%1"
[(set_attr "type" "fcvt")
(set_attr "mode" "DF")])
(define_insn "extendhfdf2"
[(set (match_operand:DF 0 "register_operand" "=f")
(float_extend:DF
(match_operand:HF 1 "register_operand" " f")))]
"(TARGET_ZFHMIN && TARGET_DOUBLE_FLOAT) ||
(TARGET_ZHINXMIN && TARGET_ZDINX)"
"fcvt.d.h\t%0,%1"
[(set_attr "type" "fcvt")
(set_attr "mode" "DF")])
;; 16-bit floating point moves
(define_expand "mov"
[(set (match_operand:HFBF 0 "")
(match_operand:HFBF 1 ""))]
""
{
if (riscv_legitimize_move (mode, operands[0], operands[1]))
DONE;
})
(define_insn "*mov_hardfloat"
[(set (match_operand:HFBF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
(match_operand:HFBF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*G*r,*m,*r"))]
"((TARGET_ZFHMIN && mode == HFmode)
|| (TARGET_ZFBFMIN && mode == BFmode))
&& (register_operand (operands[0], mode)
|| reg_or_0_operand (operands[1], mode))"
{ return riscv_output_move (operands[0], operands[1]); }
[(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
(set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
(set_attr "mode" "")])
(define_insn "*mov_softfloat"
[(set (match_operand:HFBF 0 "nonimmediate_operand" "=f, r,r,m,*f,*r")
(match_operand:HFBF 1 "move_operand" " f,Gr,m,r,*r,*f"))]
"((!TARGET_ZFHMIN && mode == HFmode) || (mode == BFmode))
&& (register_operand (operands[0], mode)
|| reg_or_0_operand (operands[1], mode))"
{ return riscv_output_move (operands[0], operands[1]); }
[(set_attr "move_type" "fmove,move,load,store,mtc,mfc")
(set_attr "type" "fmove,move,load,store,mtc,mfc")
(set_attr "mode" "")])
(define_insn "*mov_softfloat_boxing"
[(set (match_operand:HFBF 0 "register_operand" "=f")
(unspec:HFBF [(match_operand:X 1 "register_operand" " r")]
UNSPEC_FMV_FP16_X))]
"!TARGET_ZFHMIN"
"fmv.w.x\t%0,%1"
[(set_attr "type" "fmove")
(set_attr "mode" "SF")])
;;
;; ....................
;;
;; CONVERSIONS
;;
;; ....................
(define_expand "_truncsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(fix_ops:SI
(match_operand:ANYF 1 "register_operand" " f")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
{
if (TARGET_64BIT)
{
rtx t = gen_reg_rtx (DImode);
emit_insn (gen__truncsi2_sext (t, operands[1]));
t = gen_lowpart (SImode, t);
SUBREG_PROMOTED_VAR_P (t) = 1;
SUBREG_PROMOTED_SET (t, SRP_SIGNED);
emit_move_insn (operands[0], t);
DONE;
}
})
(define_insn "*_truncsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(fix_ops:SI
(match_operand:ANYF 1 "register_operand" " f")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fcvt.w. %0,%1,rtz"
[(set_attr "type" "fcvt_f2i")
(set_attr "mode" "")])
(define_insn "_truncsi2_sext"
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI (fix_ops:SI
(match_operand:ANYF 1 "register_operand" " f"))))]
"TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
"fcvt.w. %0,%1,rtz"
[(set_attr "type" "fcvt_f2i")
(set_attr "mode" "")])
(define_insn "_truncdi2"
[(set (match_operand:DI 0 "register_operand" "=r")
(fix_ops:DI
(match_operand:ANYF 1 "register_operand" " f")))]
"TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
"fcvt.l. %0,%1,rtz"
[(set_attr "type" "fcvt_f2i")
(set_attr "mode" "")])
(define_insn "float2"
[(set (match_operand:ANYF 0 "register_operand" "= f")
(float:ANYF
(match_operand:GPR 1 "reg_or_0_operand" " rJ")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fcvt..\t%0,%z1"
[(set_attr "type" "fcvt_i2f")
(set_attr "mode" "")])
(define_insn "floatuns2"
[(set (match_operand:ANYF 0 "register_operand" "= f")
(unsigned_float:ANYF
(match_operand:GPR 1 "reg_or_0_operand" " rJ")))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fcvt..u\t%0,%z1"
[(set_attr "type" "fcvt_i2f")
(set_attr "mode" "")])
(define_expand "lrintsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI
[(match_operand:ANYF 1 "register_operand" " f")]
UNSPEC_LRINT))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
{
if (TARGET_64BIT)
{
rtx t = gen_reg_rtx (DImode);
emit_insn (gen_lrintsi2_sext (t, operands[1]));
t = gen_lowpart (SImode, t);
SUBREG_PROMOTED_VAR_P (t) = 1;
SUBREG_PROMOTED_SET (t, SRP_SIGNED);
emit_move_insn (operands[0], t);
DONE;
}
})
(define_insn "*lrintsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI
[(match_operand:ANYF 1 "register_operand" " f")]
UNSPEC_LRINT))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fcvt.w. %0,%1,dyn"
[(set_attr "type" "fcvt_f2i")
(set_attr "mode" "")])
(define_insn "lrintsi2_sext"
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI (unspec:SI
[(match_operand:ANYF 1 "register_operand" " f")]
UNSPEC_LRINT)))]
"TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
"fcvt.w. %0,%1,dyn"
[(set_attr "type" "fcvt_f2i")
(set_attr "mode" "")])
(define_insn "lrintdi2"
[(set (match_operand:DI 0 "register_operand" "=r")
(unspec:DI
[(match_operand:ANYF 1 "register_operand" " f")]
UNSPEC_LRINT))]
"TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
"fcvt.l. %0,%1,dyn"
[(set_attr "type" "fcvt_f2i")
(set_attr "mode" "")])
(define_expand "lsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI
[(match_operand:ANYF 1 "register_operand" " f")]
ROUND))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
{
if (TARGET_64BIT)
{
rtx t = gen_reg_rtx (DImode);
emit_insn (gen_lsi2_sext (t, operands[1]));
t = gen_lowpart (SImode, t);
SUBREG_PROMOTED_VAR_P (t) = 1;
SUBREG_PROMOTED_SET (t, SRP_SIGNED);
emit_move_insn (operands[0], t);
DONE;
}
})
(define_insn "*lsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI
[(match_operand:ANYF 1 "register_operand" " f")]
ROUND))]
"TARGET_HARD_FLOAT || TARGET_ZFINX"
"fcvt.w. %0,%1,"
[(set_attr "type" "fcvt_f2i")
(set_attr "mode" "")])
(define_insn "lsi2_sext"
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI (unspec:SI
[(match_operand:ANYF 1 "register_operand" " f")]
ROUND)))]
"TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
"fcvt.w. %0,%1,"
[(set_attr "type" "fcvt_f2i")
(set_attr "mode" "")])
(define_insn "ldi2"
[(set (match_operand:DI 0 "register_operand" "=r")
(unspec:DI
[(match_operand:ANYF 1 "register_operand" " f")]
ROUND))]
"TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
"fcvt.l. %0,%1,"
[(set_attr "type" "fcvt_f2i")
(set_attr "mode" "")])
;; There are a couple non-obvious restrictions to be aware of.
;;
;; We'll do a FP-INT conversion in the sequence. But we don't
;; have a .l (64bit) variant of those instructions for rv32.
;; To preserve proper semantics we must reject DFmode inputs
;; for rv32 unless Zfa is enabled.
;;
;; The ANYF iterator allows HFmode. We don't have all the
;; necessary patterns defined for HFmode. So restrict HFmode
;; to TARGET_ZFA.
(define_expand "2"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(unspec:ANYF
[(match_operand:ANYF 1 "register_operand" " f")]
ROUND))]
"(TARGET_HARD_FLOAT
&& (TARGET_ZFA || flag_fp_int_builtin_inexact || !flag_trapping_math)
&& (TARGET_ZFA || TARGET_64BIT || mode != DFmode)
&& (TARGET_ZFA || mode != HFmode))"
{
if (TARGET_ZFA)
emit_insn (gen__zfa2 (operands[0],
operands[1]));
else
{
rtx reg;
rtx label = gen_label_rtx ();
rtx end_label = gen_label_rtx ();
rtx abs_reg = gen_reg_rtx (mode);
rtx coeff_reg = gen_reg_rtx (mode);
rtx tmp_reg = gen_reg_rtx (mode);
riscv_emit_move (tmp_reg, operands[1]);
riscv_emit_move (coeff_reg,
riscv_vector::get_fp_rounding_coefficient (mode));
emit_insn (gen_abs2 (abs_reg, operands[1]));
riscv_expand_conditional_branch (label, LT, abs_reg, coeff_reg);
emit_jump_insn (gen_jump (end_label));
emit_barrier ();
emit_label (label);
switch (mode)
{
case SFmode:
reg = gen_reg_rtx (SImode);
emit_insn (gen_lsfsi2 (reg, operands[1]));
emit_insn (gen_floatsisf2 (abs_reg, reg));
break;
case DFmode:
reg = gen_reg_rtx (DImode);
emit_insn (gen_ldfdi2 (reg, operands[1]));
emit_insn (gen_floatdidf2 (abs_reg, reg));
break;
default:
gcc_unreachable ();
}
emit_insn (gen_copysign3 (tmp_reg, abs_reg, operands[1]));
emit_label (end_label);
riscv_emit_move (operands[0], tmp_reg);
}
DONE;
})
(define_insn "_zfa2"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(unspec:ANYF
[(match_operand:ANYF 1 "register_operand" " f")]
ROUND))]
"TARGET_HARD_FLOAT && TARGET_ZFA"
"fround.\t%0,%1,"
[(set_attr "type" "fcvt")
(set_attr "mode" "")])
(define_insn "rint2"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(unspec:ANYF
[(match_operand:ANYF 1 "register_operand" " f")]
UNSPEC_RINT))]
"TARGET_HARD_FLOAT && TARGET_ZFA"
"froundnx.