;; Machine description for RISC-V 'V' Extension for GNU compiler.
;; Copyright (C) 2022-2023 Free Software Foundation, Inc.
;; Contributed by Juzhe Zhong (juzhe.zhong@rivai.ai), RiVAI Technologies Ltd.
;; This file is part of GCC.
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; .
;; This file describes the RISC-V 'V' Extension, Version 1.0.
;;
;; This file include :
;;
;; - Intrinsics (https://github.com/riscv/rvv-intrinsic-doc)
;; - Auto-vectorization (autovec.md)
;; - Optimization (autovec-opt.md)
(include "vector-iterators.md")
(define_constants [
(INVALID_ATTRIBUTE 255)
(X0_REGNUM 0)
])
;; True if the type is RVV instructions that include VTYPE
;; global status register in the use op list.
;; We known VTYPE has 4 fields: SEW, LMUL, TA, MA.
;; The instruction need any of VTYPE field is set as true
;; in this attribute.
(define_attr "has_vtype_op" "false,true"
(cond [(eq_attr "type" "vlde,vste,vldm,vstm,vlds,vsts,\
vldux,vldox,vstux,vstox,vldff,\
vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,\
vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,\
vsalu,vaalu,vsmul,vsshift,vnclip,\
vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,\
vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov,\
vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,\
vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,\
vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,\
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
vssegtux,vssegtox,vlsegdff")
(const_string "true")]
(const_string "false")))
;; True if the type is RVV instructions that include VL
;; global status register in the use op list.
;; The instruction need vector length to be specified is set
;; in this attribute.
(define_attr "has_vl_op" "false,true"
(cond [(eq_attr "type" "vlde,vste,vldm,vstm,vlds,vsts,\
vldux,vldox,vstux,vstox,vldff,\
vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,\
vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,\
vsalu,vaalu,vsmul,vsshift,vnclip,\
vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,\
vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov,\
vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,\
vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,\
vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovxv,vfmovfv,\
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
vssegtux,vssegtox,vlsegdff")
(const_string "true")]
(const_string "false")))
;; The default SEW of RVV instruction. This attribute doesn't mean the instruction
;; is necessary to require SEW check for example vlm.v which require ratio to
;; check. However, we need default value of SEW for vsetvl instruction since there
;; is no field for ratio in the vsetvl instruction encoding.
(define_attr "sew" ""
(cond [(eq_attr "mode" "RVVMF64BI,RVVMF32BI,RVVMF16BI,RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,\
RVVM8QI,RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI,\
RVVM1x8QI,RVVMF2x8QI,RVVMF4x8QI,RVVMF8x8QI,\
RVVM1x7QI,RVVMF2x7QI,RVVMF4x7QI,RVVMF8x7QI,\
RVVM1x6QI,RVVMF2x6QI,RVVMF4x6QI,RVVMF8x6QI,\
RVVM1x5QI,RVVMF2x5QI,RVVMF4x5QI,RVVMF8x5QI,\
RVVM2x4QI,RVVM1x4QI,RVVMF2x4QI,RVVMF4x4QI,RVVMF8x4QI,\
RVVM2x3QI,RVVM1x3QI,RVVMF2x3QI,RVVMF4x3QI,RVVMF8x3QI,\
RVVM4x2QI,RVVM2x2QI,RVVM1x2QI,RVVMF2x2QI,RVVMF4x2QI,RVVMF8x2QI,\
V1QI,V2QI,V4QI,V8QI,V16QI,V32QI,V64QI,V128QI,V256QI,V512QI,V1024QI,V2048QI,V4096QI,\
V1BI,V2BI,V4BI,V8BI,V16BI,V32BI,V64BI,V128BI,V256BI,V512BI,V1024BI,V2048BI,V4096BI")
(const_int 8)
(eq_attr "mode" "RVVM8HI,RVVM4HI,RVVM2HI,RVVM1HI,RVVMF2HI,RVVMF4HI,\
RVVM1x8HI,RVVMF2x8HI,RVVMF4x8HI,\
RVVM1x7HI,RVVMF2x7HI,RVVMF4x7HI,\
RVVM1x6HI,RVVMF2x6HI,RVVMF4x6HI,\
RVVM1x5HI,RVVMF2x5HI,RVVMF4x5HI,\
RVVM2x4HI,RVVM1x4HI,RVVMF2x4HI,RVVMF4x4HI,\
RVVM2x3HI,RVVM1x3HI,RVVMF2x3HI,RVVMF4x3HI,\
RVVM4x2HI,RVVM2x2HI,RVVM1x2HI,RVVMF2x2HI,RVVMF4x2HI,\
RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF,\
RVVM1x8HF,RVVMF2x8HF,RVVMF4x8HF,\
RVVM1x7HF,RVVMF2x7HF,RVVMF4x7HF,\
RVVM1x6HF,RVVMF2x6HF,RVVMF4x6HF,\
RVVM1x5HF,RVVMF2x5HF,RVVMF4x5HF,\
RVVM2x4HF,RVVM1x4HF,RVVMF2x4HF,RVVMF4x4HF,\
RVVM2x3HF,RVVM1x3HF,RVVMF2x3HF,RVVMF4x3HF,\
RVVM4x2HF,RVVM2x2HF,RVVM1x2HF,RVVMF2x2HF,RVVMF4x2HF,\
V1HI,V2HI,V4HI,V8HI,V16HI,V32HI,V64HI,V128HI,V256HI,V512HI,V1024HI,V2048HI,\
V1HF,V2HF,V4HF,V8HF,V16HF,V32HF,V64HF,V128HF,V256HF,V512HF,V1024HF,V2048HF")
(const_int 16)
(eq_attr "mode" "RVVM8SI,RVVM4SI,RVVM2SI,RVVM1SI,RVVMF2SI,\
RVVM8SF,RVVM4SF,RVVM2SF,RVVM1SF,RVVMF2SF,\
RVVM1x8SI,RVVMF2x8SI,\
RVVM1x7SI,RVVMF2x7SI,\
RVVM1x6SI,RVVMF2x6SI,\
RVVM1x5SI,RVVMF2x5SI,\
RVVMF2x4SI,RVVMF2x3SI,\
RVVM2x4SI,RVVM1x4SI,\
RVVM2x3SI,RVVM1x3SI,\
RVVM4x2SI,RVVM2x2SI,RVVM1x2SI,RVVMF2x2SI,\
RVVM1x8SF,RVVMF2x8SF,\
RVVM1x7SF,RVVMF2x7SF,\
RVVM1x6SF,RVVMF2x6SF,\
RVVM1x5SF,RVVMF2x5SF,\
RVVM2x4SF,RVVM1x4SF,RVVMF2x4SF,\
RVVM2x3SF,RVVM1x3SF,RVVMF2x3SF,\
RVVM4x2SF,RVVM2x2SF,RVVM1x2SF,RVVMF2x2SF,\
V1SI,V2SI,V4SI,V8SI,V16SI,V32SI,V64SI,V128SI,V256SI,V512SI,V1024SI,\
V1SF,V2SF,V4SF,V8SF,V16SF,V32SF,V64SF,V128SF,V256SF,V512SF,V1024SF")
(const_int 32)
(eq_attr "mode" "RVVM8DI,RVVM4DI,RVVM2DI,RVVM1DI,\
RVVM8DF,RVVM4DF,RVVM2DF,RVVM1DF,\
RVVM1x8DI,RVVM1x7DI,RVVM1x6DI,RVVM1x5DI,\
RVVM2x4DI,RVVM1x4DI,\
RVVM2x3DI,RVVM1x3DI,\
RVVM4x2DI,RVVM2x2DI,RVVM1x2DI,\
RVVM1x8DF,RVVM1x7DF,RVVM1x6DF,RVVM1x5DF,\
RVVM2x4DF,RVVM1x4DF,\
RVVM2x3DF,RVVM1x3DF,\
RVVM4x2DF,RVVM2x2DF,RVVM1x2DF,\
V1DI,V2DI,V4DI,V8DI,V16DI,V32DI,V64DI,V128DI,V256DI,V512DI,\
V1DF,V2DF,V4DF,V8DF,V16DF,V32DF,V64DF,V128DF,V256DF,V512DF")
(const_int 64)]
(const_int INVALID_ATTRIBUTE)))
;; Ditto to LMUL.
(define_attr "vlmul" ""
(cond [(eq_attr "mode" "RVVM8QI,RVVM1BI") (symbol_ref "riscv_vector::LMUL_8")
(eq_attr "mode" "RVVM4QI,RVVMF2BI") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2QI,RVVMF4BI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1QI,RVVMF8BI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2QI,RVVMF16BI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4QI,RVVMF32BI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVMF8QI,RVVMF64BI") (symbol_ref "riscv_vector::LMUL_F8")
(eq_attr "mode" "RVVM8HI") (symbol_ref "riscv_vector::LMUL_8")
(eq_attr "mode" "RVVM4HI") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2HI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1HI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2HI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4HI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM8HF") (symbol_ref "riscv_vector::LMUL_8")
(eq_attr "mode" "RVVM4HF") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2HF") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1HF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2HF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4HF") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM8SI") (symbol_ref "riscv_vector::LMUL_8")
(eq_attr "mode" "RVVM4SI") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2SI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1SI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2SI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM8SF") (symbol_ref "riscv_vector::LMUL_8")
(eq_attr "mode" "RVVM4SF") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2SF") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1SF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2SF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM8DI") (symbol_ref "riscv_vector::LMUL_8")
(eq_attr "mode" "RVVM4DI") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2DI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1DI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM8DF") (symbol_ref "riscv_vector::LMUL_8")
(eq_attr "mode" "RVVM4DF") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2DF") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1DF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM1x8QI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x8QI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x8QI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVMF8x8QI") (symbol_ref "riscv_vector::LMUL_F8")
(eq_attr "mode" "RVVM1x7QI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x7QI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x7QI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVMF8x7QI") (symbol_ref "riscv_vector::LMUL_F8")
(eq_attr "mode" "RVVM1x6QI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x6QI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x6QI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVMF8x6QI") (symbol_ref "riscv_vector::LMUL_F8")
(eq_attr "mode" "RVVM1x5QI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x5QI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x5QI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVMF8x5QI") (symbol_ref "riscv_vector::LMUL_F8")
(eq_attr "mode" "RVVM2x4QI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x4QI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x4QI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x4QI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVMF8x4QI") (symbol_ref "riscv_vector::LMUL_F8")
(eq_attr "mode" "RVVM2x3QI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x3QI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x3QI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x3QI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVMF8x3QI") (symbol_ref "riscv_vector::LMUL_F8")
(eq_attr "mode" "RVVM4x2QI") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2x2QI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x2QI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x2QI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x2QI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVMF8x2QI") (symbol_ref "riscv_vector::LMUL_F8")
(eq_attr "mode" "RVVM1x8HI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x8HI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x8HI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM1x7HI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x7HI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x7HI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM1x6HI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x6HI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x6HI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM1x5HI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x5HI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x5HI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM2x4HI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x4HI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x4HI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x4HI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM2x3HI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x3HI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x3HI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x3HI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM4x2HI") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2x2HI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x2HI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x2HI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x2HI") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM1x8HF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x8HF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x8HF") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM1x7HF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x7HF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x7HF") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM1x6HF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x6HF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x6HF") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM1x5HF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x5HF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x5HF") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM2x4HF") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x4HF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x4HF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x4HF") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM2x3HF") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x3HF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x3HF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x3HF") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM4x2HF") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2x2HF") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x2HF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x2HF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVMF4x2HF") (symbol_ref "riscv_vector::LMUL_F4")
(eq_attr "mode" "RVVM1x8SI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x8SI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM1x7SI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x7SI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM1x6SI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x6SI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM1x5SI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x5SI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM2x4SI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x4SI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x4SI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM2x3SI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x3SI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x3SI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM4x2SI") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2x2SI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x2SI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x2SI") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM1x8SF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x8SF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM1x7SF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x7SF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM1x6SF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x6SF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM1x5SF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x5SF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM2x4SF") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x4SF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x4SF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM2x3SF") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x3SF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x3SF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM4x2SF") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2x2SF") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x2SF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVMF2x2SF") (symbol_ref "riscv_vector::LMUL_F2")
(eq_attr "mode" "RVVM1x8DI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM1x7DI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM1x6DI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM1x5DI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM2x4DI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x4DI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM2x3DI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x3DI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM4x2DI") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2x2DI") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x2DI") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM1x8DF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM1x7DF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM1x6DF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM1x5DF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM2x4DF") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x4DF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM2x3DF") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x3DF") (symbol_ref "riscv_vector::LMUL_1")
(eq_attr "mode" "RVVM4x2DF") (symbol_ref "riscv_vector::LMUL_4")
(eq_attr "mode" "RVVM2x2DF") (symbol_ref "riscv_vector::LMUL_2")
(eq_attr "mode" "RVVM1x2DF") (symbol_ref "riscv_vector::LMUL_1")
;; VLS modes.
(eq_attr "mode" "V1QI,V1BI") (symbol_ref "riscv_vector::get_vlmul(E_V1QImode)")
(eq_attr "mode" "V2QI,V2BI") (symbol_ref "riscv_vector::get_vlmul(E_V2QImode)")
(eq_attr "mode" "V4QI,V4BI") (symbol_ref "riscv_vector::get_vlmul(E_V4QImode)")
(eq_attr "mode" "V8QI,V8BI") (symbol_ref "riscv_vector::get_vlmul(E_V8QImode)")
(eq_attr "mode" "V16QI,V16BI") (symbol_ref "riscv_vector::get_vlmul(E_V16QImode)")
(eq_attr "mode" "V32QI,V32BI") (symbol_ref "riscv_vector::get_vlmul(E_V32QImode)")
(eq_attr "mode" "V64QI,V64BI") (symbol_ref "riscv_vector::get_vlmul(E_V64QImode)")
(eq_attr "mode" "V128QI,V128BI") (symbol_ref "riscv_vector::get_vlmul(E_V128QImode)")
(eq_attr "mode" "V256QI,V256BI") (symbol_ref "riscv_vector::get_vlmul(E_V256QImode)")
(eq_attr "mode" "V512QI,V512BI") (symbol_ref "riscv_vector::get_vlmul(E_V512QImode)")
(eq_attr "mode" "V1024QI,V1024BI") (symbol_ref "riscv_vector::get_vlmul(E_V1024QImode)")
(eq_attr "mode" "V2048QI,V2048BI") (symbol_ref "riscv_vector::get_vlmul(E_V2048QImode)")
(eq_attr "mode" "V4096QI,V4096BI") (symbol_ref "riscv_vector::get_vlmul(E_V4096QImode)")
(eq_attr "mode" "V1HI") (symbol_ref "riscv_vector::get_vlmul(E_V1HImode)")
(eq_attr "mode" "V2HI") (symbol_ref "riscv_vector::get_vlmul(E_V2HImode)")
(eq_attr "mode" "V4HI") (symbol_ref "riscv_vector::get_vlmul(E_V4HImode)")
(eq_attr "mode" "V8HI") (symbol_ref "riscv_vector::get_vlmul(E_V8HImode)")
(eq_attr "mode" "V16HI") (symbol_ref "riscv_vector::get_vlmul(E_V16HImode)")
(eq_attr "mode" "V32HI") (symbol_ref "riscv_vector::get_vlmul(E_V32HImode)")
(eq_attr "mode" "V64HI") (symbol_ref "riscv_vector::get_vlmul(E_V64HImode)")
(eq_attr "mode" "V128HI") (symbol_ref "riscv_vector::get_vlmul(E_V128HImode)")
(eq_attr "mode" "V256HI") (symbol_ref "riscv_vector::get_vlmul(E_V256HImode)")
(eq_attr "mode" "V512HI") (symbol_ref "riscv_vector::get_vlmul(E_V512HImode)")
(eq_attr "mode" "V1024HI") (symbol_ref "riscv_vector::get_vlmul(E_V1024HImode)")
(eq_attr "mode" "V2048HI") (symbol_ref "riscv_vector::get_vlmul(E_V2048HImode)")
(eq_attr "mode" "V1SI") (symbol_ref "riscv_vector::get_vlmul(E_V1SImode)")
(eq_attr "mode" "V2SI") (symbol_ref "riscv_vector::get_vlmul(E_V2SImode)")
(eq_attr "mode" "V4SI") (symbol_ref "riscv_vector::get_vlmul(E_V4SImode)")
(eq_attr "mode" "V8SI") (symbol_ref "riscv_vector::get_vlmul(E_V8SImode)")
(eq_attr "mode" "V16SI") (symbol_ref "riscv_vector::get_vlmul(E_V16SImode)")
(eq_attr "mode" "V32SI") (symbol_ref "riscv_vector::get_vlmul(E_V32SImode)")
(eq_attr "mode" "V64SI") (symbol_ref "riscv_vector::get_vlmul(E_V64SImode)")
(eq_attr "mode" "V128SI") (symbol_ref "riscv_vector::get_vlmul(E_V128SImode)")
(eq_attr "mode" "V256SI") (symbol_ref "riscv_vector::get_vlmul(E_V256SImode)")
(eq_attr "mode" "V512SI") (symbol_ref "riscv_vector::get_vlmul(E_V512SImode)")
(eq_attr "mode" "V1024SI") (symbol_ref "riscv_vector::get_vlmul(E_V1024SImode)")
(eq_attr "mode" "V1DI") (symbol_ref "riscv_vector::get_vlmul(E_V1DImode)")
(eq_attr "mode" "V2DI") (symbol_ref "riscv_vector::get_vlmul(E_V2DImode)")
(eq_attr "mode" "V4DI") (symbol_ref "riscv_vector::get_vlmul(E_V4DImode)")
(eq_attr "mode" "V8DI") (symbol_ref "riscv_vector::get_vlmul(E_V8DImode)")
(eq_attr "mode" "V16DI") (symbol_ref "riscv_vector::get_vlmul(E_V16DImode)")
(eq_attr "mode" "V32DI") (symbol_ref "riscv_vector::get_vlmul(E_V32DImode)")
(eq_attr "mode" "V64DI") (symbol_ref "riscv_vector::get_vlmul(E_V64DImode)")
(eq_attr "mode" "V128DI") (symbol_ref "riscv_vector::get_vlmul(E_V128DImode)")
(eq_attr "mode" "V256DI") (symbol_ref "riscv_vector::get_vlmul(E_V256DImode)")
(eq_attr "mode" "V512DI") (symbol_ref "riscv_vector::get_vlmul(E_V512DImode)")
(eq_attr "mode" "V1HF") (symbol_ref "riscv_vector::get_vlmul(E_V1HFmode)")
(eq_attr "mode" "V2HF") (symbol_ref "riscv_vector::get_vlmul(E_V2HFmode)")
(eq_attr "mode" "V4HF") (symbol_ref "riscv_vector::get_vlmul(E_V4HFmode)")
(eq_attr "mode" "V8HF") (symbol_ref "riscv_vector::get_vlmul(E_V8HFmode)")
(eq_attr "mode" "V16HF") (symbol_ref "riscv_vector::get_vlmul(E_V16HFmode)")
(eq_attr "mode" "V32HF") (symbol_ref "riscv_vector::get_vlmul(E_V32HFmode)")
(eq_attr "mode" "V64HF") (symbol_ref "riscv_vector::get_vlmul(E_V64HFmode)")
(eq_attr "mode" "V128HF") (symbol_ref "riscv_vector::get_vlmul(E_V128HFmode)")
(eq_attr "mode" "V256HF") (symbol_ref "riscv_vector::get_vlmul(E_V256HFmode)")
(eq_attr "mode" "V512HF") (symbol_ref "riscv_vector::get_vlmul(E_V512HFmode)")
(eq_attr "mode" "V1024HF") (symbol_ref "riscv_vector::get_vlmul(E_V1024HFmode)")
(eq_attr "mode" "V2048HF") (symbol_ref "riscv_vector::get_vlmul(E_V2048HFmode)")
(eq_attr "mode" "V1SF") (symbol_ref "riscv_vector::get_vlmul(E_V1SFmode)")
(eq_attr "mode" "V2SF") (symbol_ref "riscv_vector::get_vlmul(E_V2SFmode)")
(eq_attr "mode" "V4SF") (symbol_ref "riscv_vector::get_vlmul(E_V4SFmode)")
(eq_attr "mode" "V8SF") (symbol_ref "riscv_vector::get_vlmul(E_V8SFmode)")
(eq_attr "mode" "V16SF") (symbol_ref "riscv_vector::get_vlmul(E_V16SFmode)")
(eq_attr "mode" "V32SF") (symbol_ref "riscv_vector::get_vlmul(E_V32SFmode)")
(eq_attr "mode" "V64SF") (symbol_ref "riscv_vector::get_vlmul(E_V64SFmode)")
(eq_attr "mode" "V128SF") (symbol_ref "riscv_vector::get_vlmul(E_V128SFmode)")
(eq_attr "mode" "V256SF") (symbol_ref "riscv_vector::get_vlmul(E_V256SFmode)")
(eq_attr "mode" "V512SF") (symbol_ref "riscv_vector::get_vlmul(E_V512SFmode)")
(eq_attr "mode" "V1024SF") (symbol_ref "riscv_vector::get_vlmul(E_V1024SFmode)")
(eq_attr "mode" "V1DF") (symbol_ref "riscv_vector::get_vlmul(E_V1DFmode)")
(eq_attr "mode" "V2DF") (symbol_ref "riscv_vector::get_vlmul(E_V2DFmode)")
(eq_attr "mode" "V4DF") (symbol_ref "riscv_vector::get_vlmul(E_V4DFmode)")
(eq_attr "mode" "V8DF") (symbol_ref "riscv_vector::get_vlmul(E_V8DFmode)")
(eq_attr "mode" "V16DF") (symbol_ref "riscv_vector::get_vlmul(E_V16DFmode)")
(eq_attr "mode" "V32DF") (symbol_ref "riscv_vector::get_vlmul(E_V32DFmode)")
(eq_attr "mode" "V64DF") (symbol_ref "riscv_vector::get_vlmul(E_V64DFmode)")
(eq_attr "mode" "V128DF") (symbol_ref "riscv_vector::get_vlmul(E_V128DFmode)")
(eq_attr "mode" "V256DF") (symbol_ref "riscv_vector::get_vlmul(E_V256DFmode)")
(eq_attr "mode" "V512DF") (symbol_ref "riscv_vector::get_vlmul(E_V512DFmode)")]
(const_int INVALID_ATTRIBUTE)))
;; It is valid for instruction that require sew/lmul ratio.
(define_attr "ratio" ""
(cond [(eq_attr "type" "vimov,vfmov,vldux,vldox,vstux,vstox,\
vialu,vshift,vicmp,vimul,vidiv,vsalu,\
vext,viwalu,viwmul,vicalu,vnshift,\
vimuladd,vimerge,vaalu,vsmul,vsshift,\
vnclip,viminmax,viwmuladd,vmffs,vmsfs,\
vmiota,vmidx,vfalu,vfmul,vfminmax,vfdiv,\
vfwalu,vfwmul,vfsqrt,vfrecp,vfsgnj,vfcmp,\
vfmerge,vfcvtitof,vfcvtftoi,vfwcvtitof,\
vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,\
vfncvtftof,vfmuladd,vfwmuladd,vfclass,vired,\
viwred,vfredu,vfredo,vfwredu,vfwredo,vimovvx,\
vimovxv,vfmovvf,vfmovfv,vslideup,vslidedown,\
vislide1up,vislide1down,vfslide1up,vfslide1down,\
vgather,vcompress,vlsegdux,vlsegdox,vssegtux,vssegtox")
(const_int INVALID_ATTRIBUTE)
(eq_attr "mode" "RVVM8QI,RVVM1BI") (const_int 1)
(eq_attr "mode" "RVVM4QI,RVVMF2BI") (const_int 2)
(eq_attr "mode" "RVVM2QI,RVVMF4BI") (const_int 4)
(eq_attr "mode" "RVVM1QI,RVVMF8BI") (const_int 8)
(eq_attr "mode" "RVVMF2QI,RVVMF16BI") (const_int 16)
(eq_attr "mode" "RVVMF4QI,RVVMF32BI") (const_int 32)
(eq_attr "mode" "RVVMF8QI,RVVMF64BI") (const_int 64)
(eq_attr "mode" "RVVM8HI") (const_int 2)
(eq_attr "mode" "RVVM4HI") (const_int 4)
(eq_attr "mode" "RVVM2HI") (const_int 8)
(eq_attr "mode" "RVVM1HI") (const_int 16)
(eq_attr "mode" "RVVMF2HI") (const_int 32)
(eq_attr "mode" "RVVMF4HI") (const_int 64)
(eq_attr "mode" "RVVM8HF") (const_int 2)
(eq_attr "mode" "RVVM4HF") (const_int 4)
(eq_attr "mode" "RVVM2HF") (const_int 8)
(eq_attr "mode" "RVVM1HF") (const_int 16)
(eq_attr "mode" "RVVMF2HF") (const_int 32)
(eq_attr "mode" "RVVMF4HF") (const_int 64)
(eq_attr "mode" "RVVM8SI") (const_int 4)
(eq_attr "mode" "RVVM4SI") (const_int 8)
(eq_attr "mode" "RVVM2SI") (const_int 16)
(eq_attr "mode" "RVVM1SI") (const_int 32)
(eq_attr "mode" "RVVMF2SI") (const_int 64)
(eq_attr "mode" "RVVM8SF") (const_int 4)
(eq_attr "mode" "RVVM4SF") (const_int 8)
(eq_attr "mode" "RVVM2SF") (const_int 16)
(eq_attr "mode" "RVVM1SF") (const_int 32)
(eq_attr "mode" "RVVMF2SF") (const_int 64)
(eq_attr "mode" "RVVM8DI") (const_int 8)
(eq_attr "mode" "RVVM4DI") (const_int 16)
(eq_attr "mode" "RVVM2DI") (const_int 32)
(eq_attr "mode" "RVVM1DI") (const_int 64)
(eq_attr "mode" "RVVM8DF") (const_int 8)
(eq_attr "mode" "RVVM4DF") (const_int 16)
(eq_attr "mode" "RVVM2DF") (const_int 32)
(eq_attr "mode" "RVVM1DF") (const_int 64)
(eq_attr "mode" "RVVM1x8QI") (const_int 8)
(eq_attr "mode" "RVVMF2x8QI") (const_int 16)
(eq_attr "mode" "RVVMF4x8QI") (const_int 32)
(eq_attr "mode" "RVVMF8x8QI") (const_int 64)
(eq_attr "mode" "RVVM1x7QI") (const_int 8)
(eq_attr "mode" "RVVMF2x7QI") (const_int 16)
(eq_attr "mode" "RVVMF4x7QI") (const_int 32)
(eq_attr "mode" "RVVMF8x7QI") (const_int 64)
(eq_attr "mode" "RVVM1x6QI") (const_int 8)
(eq_attr "mode" "RVVMF2x6QI") (const_int 16)
(eq_attr "mode" "RVVMF4x6QI") (const_int 32)
(eq_attr "mode" "RVVMF8x6QI") (const_int 64)
(eq_attr "mode" "RVVM1x5QI") (const_int 8)
(eq_attr "mode" "RVVMF2x5QI") (const_int 16)
(eq_attr "mode" "RVVMF4x5QI") (const_int 32)
(eq_attr "mode" "RVVMF8x5QI") (const_int 64)
(eq_attr "mode" "RVVM2x4QI") (const_int 4)
(eq_attr "mode" "RVVM1x4QI") (const_int 8)
(eq_attr "mode" "RVVMF2x4QI") (const_int 16)
(eq_attr "mode" "RVVMF4x4QI") (const_int 32)
(eq_attr "mode" "RVVMF8x4QI") (const_int 64)
(eq_attr "mode" "RVVM2x3QI") (const_int 4)
(eq_attr "mode" "RVVM1x3QI") (const_int 8)
(eq_attr "mode" "RVVMF2x3QI") (const_int 16)
(eq_attr "mode" "RVVMF4x3QI") (const_int 32)
(eq_attr "mode" "RVVMF8x3QI") (const_int 64)
(eq_attr "mode" "RVVM4x2QI") (const_int 2)
(eq_attr "mode" "RVVM2x2QI") (const_int 4)
(eq_attr "mode" "RVVM1x2QI") (const_int 8)
(eq_attr "mode" "RVVMF2x2QI") (const_int 16)
(eq_attr "mode" "RVVMF4x2QI") (const_int 32)
(eq_attr "mode" "RVVMF8x2QI") (const_int 64)
(eq_attr "mode" "RVVM1x8HI") (const_int 16)
(eq_attr "mode" "RVVMF2x8HI") (const_int 32)
(eq_attr "mode" "RVVMF4x8HI") (const_int 64)
(eq_attr "mode" "RVVM1x7HI") (const_int 16)
(eq_attr "mode" "RVVMF2x7HI") (const_int 32)
(eq_attr "mode" "RVVMF4x7HI") (const_int 64)
(eq_attr "mode" "RVVM1x6HI") (const_int 16)
(eq_attr "mode" "RVVMF2x6HI") (const_int 32)
(eq_attr "mode" "RVVMF4x6HI") (const_int 64)
(eq_attr "mode" "RVVM1x5HI") (const_int 16)
(eq_attr "mode" "RVVMF2x5HI") (const_int 32)
(eq_attr "mode" "RVVMF4x5HI") (const_int 64)
(eq_attr "mode" "RVVM2x4HI") (const_int 8)
(eq_attr "mode" "RVVM1x4HI") (const_int 16)
(eq_attr "mode" "RVVMF2x4HI") (const_int 32)
(eq_attr "mode" "RVVMF4x4HI") (const_int 64)
(eq_attr "mode" "RVVM2x3HI") (const_int 8)
(eq_attr "mode" "RVVM1x3HI") (const_int 16)
(eq_attr "mode" "RVVMF2x3HI") (const_int 32)
(eq_attr "mode" "RVVMF4x3HI") (const_int 64)
(eq_attr "mode" "RVVM4x2HI") (const_int 4)
(eq_attr "mode" "RVVM2x2HI") (const_int 8)
(eq_attr "mode" "RVVM1x2HI") (const_int 16)
(eq_attr "mode" "RVVMF2x2HI") (const_int 32)
(eq_attr "mode" "RVVMF4x2HI") (const_int 64)
(eq_attr "mode" "RVVM1x8HF") (const_int 16)
(eq_attr "mode" "RVVMF2x8HF") (const_int 32)
(eq_attr "mode" "RVVMF4x8HF") (const_int 64)
(eq_attr "mode" "RVVM1x7HF") (const_int 16)
(eq_attr "mode" "RVVMF2x7HF") (const_int 32)
(eq_attr "mode" "RVVMF4x7HF") (const_int 64)
(eq_attr "mode" "RVVM1x6HF") (const_int 16)
(eq_attr "mode" "RVVMF2x6HF") (const_int 32)
(eq_attr "mode" "RVVMF4x6HF") (const_int 64)
(eq_attr "mode" "RVVM1x5HF") (const_int 16)
(eq_attr "mode" "RVVMF2x5HF") (const_int 32)
(eq_attr "mode" "RVVMF4x5HF") (const_int 64)
(eq_attr "mode" "RVVM2x4HF") (const_int 8)
(eq_attr "mode" "RVVM1x4HF") (const_int 16)
(eq_attr "mode" "RVVMF2x4HF") (const_int 32)
(eq_attr "mode" "RVVMF4x4HF") (const_int 64)
(eq_attr "mode" "RVVM2x3HF") (const_int 8)
(eq_attr "mode" "RVVM1x3HF") (const_int 16)
(eq_attr "mode" "RVVMF2x3HF") (const_int 32)
(eq_attr "mode" "RVVMF4x3HF") (const_int 64)
(eq_attr "mode" "RVVM4x2HF") (const_int 4)
(eq_attr "mode" "RVVM2x2HF") (const_int 8)
(eq_attr "mode" "RVVM1x2HF") (const_int 16)
(eq_attr "mode" "RVVMF2x2HF") (const_int 32)
(eq_attr "mode" "RVVMF4x2HF") (const_int 64)
(eq_attr "mode" "RVVM1x8SI") (const_int 32)
(eq_attr "mode" "RVVMF2x8SI") (const_int 64)
(eq_attr "mode" "RVVM1x7SI") (const_int 32)
(eq_attr "mode" "RVVMF2x7SI") (const_int 64)
(eq_attr "mode" "RVVM1x6SI") (const_int 32)
(eq_attr "mode" "RVVMF2x6SI") (const_int 64)
(eq_attr "mode" "RVVM1x5SI") (const_int 32)
(eq_attr "mode" "RVVMF2x5SI") (const_int 64)
(eq_attr "mode" "RVVM2x4SI") (const_int 16)
(eq_attr "mode" "RVVM1x4SI") (const_int 32)
(eq_attr "mode" "RVVMF2x4SI") (const_int 64)
(eq_attr "mode" "RVVM2x3SI") (const_int 16)
(eq_attr "mode" "RVVM1x3SI") (const_int 32)
(eq_attr "mode" "RVVMF2x3SI") (const_int 64)
(eq_attr "mode" "RVVM4x2SI") (const_int 8)
(eq_attr "mode" "RVVM2x2SI") (const_int 16)
(eq_attr "mode" "RVVM1x2SI") (const_int 32)
(eq_attr "mode" "RVVMF2x2SI") (const_int 64)
(eq_attr "mode" "RVVM1x8SF") (const_int 32)
(eq_attr "mode" "RVVMF2x8SF") (const_int 64)
(eq_attr "mode" "RVVM1x7SF") (const_int 32)
(eq_attr "mode" "RVVMF2x7SF") (const_int 64)
(eq_attr "mode" "RVVM1x6SF") (const_int 32)
(eq_attr "mode" "RVVMF2x6SF") (const_int 64)
(eq_attr "mode" "RVVM1x5SF") (const_int 32)
(eq_attr "mode" "RVVMF2x5SF") (const_int 64)
(eq_attr "mode" "RVVM2x4SF") (const_int 16)
(eq_attr "mode" "RVVM1x4SF") (const_int 32)
(eq_attr "mode" "RVVMF2x4SF") (const_int 64)
(eq_attr "mode" "RVVM2x3SF") (const_int 16)
(eq_attr "mode" "RVVM1x3SF") (const_int 32)
(eq_attr "mode" "RVVMF2x3SF") (const_int 64)
(eq_attr "mode" "RVVM4x2SF") (const_int 8)
(eq_attr "mode" "RVVM2x2SF") (const_int 16)
(eq_attr "mode" "RVVM1x2SF") (const_int 32)
(eq_attr "mode" "RVVMF2x2SF") (const_int 64)
(eq_attr "mode" "RVVM1x8DI") (const_int 64)
(eq_attr "mode" "RVVM1x7DI") (const_int 64)
(eq_attr "mode" "RVVM1x6DI") (const_int 64)
(eq_attr "mode" "RVVM1x5DI") (const_int 64)
(eq_attr "mode" "RVVM2x4DI") (const_int 32)
(eq_attr "mode" "RVVM1x4DI") (const_int 64)
(eq_attr "mode" "RVVM2x3DI") (const_int 32)
(eq_attr "mode" "RVVM1x3DI") (const_int 64)
(eq_attr "mode" "RVVM4x2DI") (const_int 16)
(eq_attr "mode" "RVVM2x2DI") (const_int 32)
(eq_attr "mode" "RVVM1x2DI") (const_int 64)
(eq_attr "mode" "RVVM1x8DF") (const_int 64)
(eq_attr "mode" "RVVM1x7DF") (const_int 64)
(eq_attr "mode" "RVVM1x6DF") (const_int 64)
(eq_attr "mode" "RVVM1x5DF") (const_int 64)
(eq_attr "mode" "RVVM2x4DF") (const_int 32)
(eq_attr "mode" "RVVM1x4DF") (const_int 64)
(eq_attr "mode" "RVVM2x3DF") (const_int 32)
(eq_attr "mode" "RVVM1x3DF") (const_int 64)
(eq_attr "mode" "RVVM4x2DF") (const_int 16)
(eq_attr "mode" "RVVM2x2DF") (const_int 32)
(eq_attr "mode" "RVVM1x2DF") (const_int 64)
;; VLS modes.
(eq_attr "mode" "V1QI,V1BI") (symbol_ref "riscv_vector::get_ratio(E_V1QImode)")
(eq_attr "mode" "V2QI,V2BI") (symbol_ref "riscv_vector::get_ratio(E_V2QImode)")
(eq_attr "mode" "V4QI,V4BI") (symbol_ref "riscv_vector::get_ratio(E_V4QImode)")
(eq_attr "mode" "V8QI,V8BI") (symbol_ref "riscv_vector::get_ratio(E_V8QImode)")
(eq_attr "mode" "V16QI,V16BI") (symbol_ref "riscv_vector::get_ratio(E_V16QImode)")
(eq_attr "mode" "V32QI,V32BI") (symbol_ref "riscv_vector::get_ratio(E_V32QImode)")
(eq_attr "mode" "V64QI,V64BI") (symbol_ref "riscv_vector::get_ratio(E_V64QImode)")
(eq_attr "mode" "V128QI,V128BI") (symbol_ref "riscv_vector::get_ratio(E_V128QImode)")
(eq_attr "mode" "V256QI,V256BI") (symbol_ref "riscv_vector::get_ratio(E_V256QImode)")
(eq_attr "mode" "V512QI,V512BI") (symbol_ref "riscv_vector::get_ratio(E_V512QImode)")
(eq_attr "mode" "V1024QI,V1024BI") (symbol_ref "riscv_vector::get_ratio(E_V1024QImode)")
(eq_attr "mode" "V2048QI,V2048BI") (symbol_ref "riscv_vector::get_ratio(E_V2048QImode)")
(eq_attr "mode" "V4096QI,V4096BI") (symbol_ref "riscv_vector::get_ratio(E_V4096QImode)")
(eq_attr "mode" "V1HI") (symbol_ref "riscv_vector::get_ratio(E_V1HImode)")
(eq_attr "mode" "V2HI") (symbol_ref "riscv_vector::get_ratio(E_V2HImode)")
(eq_attr "mode" "V4HI") (symbol_ref "riscv_vector::get_ratio(E_V4HImode)")
(eq_attr "mode" "V8HI") (symbol_ref "riscv_vector::get_ratio(E_V8HImode)")
(eq_attr "mode" "V16HI") (symbol_ref "riscv_vector::get_ratio(E_V16HImode)")
(eq_attr "mode" "V32HI") (symbol_ref "riscv_vector::get_ratio(E_V32HImode)")
(eq_attr "mode" "V64HI") (symbol_ref "riscv_vector::get_ratio(E_V64HImode)")
(eq_attr "mode" "V128HI") (symbol_ref "riscv_vector::get_ratio(E_V128HImode)")
(eq_attr "mode" "V256HI") (symbol_ref "riscv_vector::get_ratio(E_V256HImode)")
(eq_attr "mode" "V512HI") (symbol_ref "riscv_vector::get_ratio(E_V512HImode)")
(eq_attr "mode" "V1024HI") (symbol_ref "riscv_vector::get_ratio(E_V1024HImode)")
(eq_attr "mode" "V2048HI") (symbol_ref "riscv_vector::get_ratio(E_V2048HImode)")
(eq_attr "mode" "V1SI") (symbol_ref "riscv_vector::get_ratio(E_V1SImode)")
(eq_attr "mode" "V2SI") (symbol_ref "riscv_vector::get_ratio(E_V2SImode)")
(eq_attr "mode" "V4SI") (symbol_ref "riscv_vector::get_ratio(E_V4SImode)")
(eq_attr "mode" "V8SI") (symbol_ref "riscv_vector::get_ratio(E_V8SImode)")
(eq_attr "mode" "V16SI") (symbol_ref "riscv_vector::get_ratio(E_V16SImode)")
(eq_attr "mode" "V32SI") (symbol_ref "riscv_vector::get_ratio(E_V32SImode)")
(eq_attr "mode" "V64SI") (symbol_ref "riscv_vector::get_ratio(E_V64SImode)")
(eq_attr "mode" "V128SI") (symbol_ref "riscv_vector::get_ratio(E_V128SImode)")
(eq_attr "mode" "V256SI") (symbol_ref "riscv_vector::get_ratio(E_V256SImode)")
(eq_attr "mode" "V512SI") (symbol_ref "riscv_vector::get_ratio(E_V512SImode)")
(eq_attr "mode" "V1024SI") (symbol_ref "riscv_vector::get_ratio(E_V1024SImode)")
(eq_attr "mode" "V1DI") (symbol_ref "riscv_vector::get_ratio(E_V1DImode)")
(eq_attr "mode" "V2DI") (symbol_ref "riscv_vector::get_ratio(E_V2DImode)")
(eq_attr "mode" "V4DI") (symbol_ref "riscv_vector::get_ratio(E_V4DImode)")
(eq_attr "mode" "V8DI") (symbol_ref "riscv_vector::get_ratio(E_V8DImode)")
(eq_attr "mode" "V16DI") (symbol_ref "riscv_vector::get_ratio(E_V16DImode)")
(eq_attr "mode" "V32DI") (symbol_ref "riscv_vector::get_ratio(E_V32DImode)")
(eq_attr "mode" "V64DI") (symbol_ref "riscv_vector::get_ratio(E_V64DImode)")
(eq_attr "mode" "V128DI") (symbol_ref "riscv_vector::get_ratio(E_V128DImode)")
(eq_attr "mode" "V256DI") (symbol_ref "riscv_vector::get_ratio(E_V256DImode)")
(eq_attr "mode" "V512DI") (symbol_ref "riscv_vector::get_ratio(E_V512DImode)")
(eq_attr "mode" "V1HF") (symbol_ref "riscv_vector::get_ratio(E_V1HFmode)")
(eq_attr "mode" "V2HF") (symbol_ref "riscv_vector::get_ratio(E_V2HFmode)")
(eq_attr "mode" "V4HF") (symbol_ref "riscv_vector::get_ratio(E_V4HFmode)")
(eq_attr "mode" "V8HF") (symbol_ref "riscv_vector::get_ratio(E_V8HFmode)")
(eq_attr "mode" "V16HF") (symbol_ref "riscv_vector::get_ratio(E_V16HFmode)")
(eq_attr "mode" "V32HF") (symbol_ref "riscv_vector::get_ratio(E_V32HFmode)")
(eq_attr "mode" "V64HF") (symbol_ref "riscv_vector::get_ratio(E_V64HFmode)")
(eq_attr "mode" "V128HF") (symbol_ref "riscv_vector::get_ratio(E_V128HFmode)")
(eq_attr "mode" "V256HF") (symbol_ref "riscv_vector::get_ratio(E_V256HFmode)")
(eq_attr "mode" "V512HF") (symbol_ref "riscv_vector::get_ratio(E_V512HFmode)")
(eq_attr "mode" "V1024HF") (symbol_ref "riscv_vector::get_ratio(E_V1024HFmode)")
(eq_attr "mode" "V2048HF") (symbol_ref "riscv_vector::get_ratio(E_V2048HFmode)")
(eq_attr "mode" "V1SF") (symbol_ref "riscv_vector::get_ratio(E_V1SFmode)")
(eq_attr "mode" "V2SF") (symbol_ref "riscv_vector::get_ratio(E_V2SFmode)")
(eq_attr "mode" "V4SF") (symbol_ref "riscv_vector::get_ratio(E_V4SFmode)")
(eq_attr "mode" "V8SF") (symbol_ref "riscv_vector::get_ratio(E_V8SFmode)")
(eq_attr "mode" "V16SF") (symbol_ref "riscv_vector::get_ratio(E_V16SFmode)")
(eq_attr "mode" "V32SF") (symbol_ref "riscv_vector::get_ratio(E_V32SFmode)")
(eq_attr "mode" "V64SF") (symbol_ref "riscv_vector::get_ratio(E_V64SFmode)")
(eq_attr "mode" "V128SF") (symbol_ref "riscv_vector::get_ratio(E_V128SFmode)")
(eq_attr "mode" "V256SF") (symbol_ref "riscv_vector::get_ratio(E_V256SFmode)")
(eq_attr "mode" "V512SF") (symbol_ref "riscv_vector::get_ratio(E_V512SFmode)")
(eq_attr "mode" "V1024SF") (symbol_ref "riscv_vector::get_ratio(E_V1024SFmode)")
(eq_attr "mode" "V1DF") (symbol_ref "riscv_vector::get_ratio(E_V1DFmode)")
(eq_attr "mode" "V2DF") (symbol_ref "riscv_vector::get_ratio(E_V2DFmode)")
(eq_attr "mode" "V4DF") (symbol_ref "riscv_vector::get_ratio(E_V4DFmode)")
(eq_attr "mode" "V8DF") (symbol_ref "riscv_vector::get_ratio(E_V8DFmode)")
(eq_attr "mode" "V16DF") (symbol_ref "riscv_vector::get_ratio(E_V16DFmode)")
(eq_attr "mode" "V32DF") (symbol_ref "riscv_vector::get_ratio(E_V32DFmode)")
(eq_attr "mode" "V64DF") (symbol_ref "riscv_vector::get_ratio(E_V64DFmode)")
(eq_attr "mode" "V128DF") (symbol_ref "riscv_vector::get_ratio(E_V128DFmode)")
(eq_attr "mode" "V256DF") (symbol_ref "riscv_vector::get_ratio(E_V256DFmode)")
(eq_attr "mode" "V512DF") (symbol_ref "riscv_vector::get_ratio(E_V512DFmode)")]
(const_int INVALID_ATTRIBUTE)))
;; The index of operand[] to get the merge op.
(define_attr "merge_op_idx" ""
(cond [(eq_attr "type" "vlde,vimov,vfmov,vldm,vlds,vmalu,vldux,vldox,vicmp,\
vialu,vshift,viminmax,vimul,vidiv,vsalu,vext,viwalu,\
viwmul,vnshift,vaalu,vsmul,vsshift,vnclip,vmsfs,\
vmiota,vmidx,vfalu,vfmul,vfminmax,vfdiv,vfwalu,vfwmul,\
vfsqrt,vfrecp,vfsgnj,vfcmp,vfcvtitof,vfcvtftoi,vfwcvtitof,\
vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,vfclass,\
vired,viwred,vfredu,vfredo,vfwredu,vfwredo,vimovxv,vfmovfv,\
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
vgather,vldff,viwmuladd,vfwmuladd,vlsegde,vlsegds,vlsegdux,vlsegdox,vlsegdff")
(const_int 2)
(eq_attr "type" "vimerge,vfmerge,vcompress")
(const_int 1)
(eq_attr "type" "vimuladd,vfmuladd")
(const_int 5)]
(const_int INVALID_ATTRIBUTE)))
;; The index of operand[] represents the machine mode of the instruction.
(define_attr "mode_idx" ""
(cond [(eq_attr "type" "vlde,vste,vldm,vstm,vlds,vsts,vldux,vldox,vldff,vldr,vstr,\
vlsegde,vlsegds,vlsegdux,vlsegdox,vlsegdff,vialu,vext,vicalu,\
vshift,vicmp,viminmax,vimul,vidiv,vimuladd,vimerge,vimov,\
vsalu,vaalu,vsmul,vsshift,vfalu,vfmul,vfdiv,vfmuladd,vfsqrt,vfrecp,\
vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov,\
vfcvtitof,vfncvtitof,vfncvtftoi,vfncvtftof,vmalu,vmiota,vmidx,\
vimovxv,vfmovfv,vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
vgather,vcompress,vmov,vnclip,vnshift")
(const_int 0)
(eq_attr "type" "vimovvx,vfmovvf")
(const_int 1)
(eq_attr "type" "vssegte,vmpop,vmffs")
(const_int 2)
(eq_attr "type" "vstux,vstox,vssegts,vssegtux,vssegtox,vfcvtftoi,vfwcvtitof,vfwcvtftoi,
vfwcvtftof,vmsfs,vired,viwred,vfredu,vfredo,vfwredu,vfwredo")
(const_int 3)
(eq_attr "type" "viwalu,viwmul,viwmuladd,vfwalu,vfwmul,vfwmuladd")
(const_int 4)]
(const_int INVALID_ATTRIBUTE)))
;; The index of operand[] to get the avl op.
(define_attr "vl_op_idx" ""
(cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vldm,vstm,vmalu,vsts,vstux,\
vstox,vext,vmsfs,vmiota,vfsqrt,vfrecp,vfcvtitof,vldff,\
vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,vfncvtitof,\
vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,vcompress,\
vlsegde,vssegts,vssegtux,vssegtox,vlsegdff")
(const_int 4)
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
;; wheras it is pred_strided_load if operands[3] is vector mode.
(eq_attr "type" "vlds")
(if_then_else (match_test "VECTOR_MODE_P (GET_MODE (operands[3]))")
(const_int 5)
(const_int 4))
(eq_attr "type" "vldux,vldox,vialu,vshift,viminmax,vimul,vidiv,vsalu,\
viwalu,viwmul,vnshift,vimerge,vaalu,vsmul,\
vsshift,vnclip,vfalu,vfmul,vfminmax,vfdiv,vfwalu,vfwmul,\
vfsgnj,vfmerge,vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
vgather,viwmuladd,vfwmuladd,vlsegds,vlsegdux,vlsegdox")
(const_int 5)
(eq_attr "type" "vicmp,vimuladd,vfcmp,vfmuladd")
(const_int 6)
(eq_attr "type" "vmpop,vmffs,vmidx,vssegte")
(const_int 3)]
(const_int INVALID_ATTRIBUTE)))
;; The tail policy op value.
(define_attr "ta" ""
(cond [(eq_attr "type" "vlde,vimov,vfmov,vext,vmiota,vfsqrt,vfrecp,\
vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,\
vfncvtitof,vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,\
vcompress,vldff,vlsegde,vlsegdff")
(symbol_ref "riscv_vector::get_ta(operands[5])")
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
;; wheras it is pred_strided_load if operands[3] is vector mode.
(eq_attr "type" "vlds")
(if_then_else (match_test "VECTOR_MODE_P (GET_MODE (operands[3]))")
(symbol_ref "riscv_vector::get_ta(operands[6])")
(symbol_ref "riscv_vector::get_ta(operands[5])"))
(eq_attr "type" "vldux,vldox,vialu,vshift,viminmax,vimul,vidiv,vsalu,\
viwalu,viwmul,vnshift,vimerge,vaalu,vsmul,\
vsshift,vnclip,vfalu,vfmul,vfminmax,vfdiv,\
vfwalu,vfwmul,vfsgnj,vfmerge,vired,viwred,vfredu,\
vfredo,vfwredu,vfwredo,vslideup,vslidedown,vislide1up,\
vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd,\
vlsegds,vlsegdux,vlsegdox")
(symbol_ref "riscv_vector::get_ta(operands[6])")
(eq_attr "type" "vimuladd,vfmuladd")
(symbol_ref "riscv_vector::get_ta(operands[7])")
(eq_attr "type" "vmidx")
(symbol_ref "riscv_vector::get_ta(operands[4])")]
(const_int INVALID_ATTRIBUTE)))
;; The mask policy op value.
(define_attr "ma" ""
(cond [(eq_attr "type" "vlde,vext,vmiota,vfsqrt,vfrecp,vfcvtitof,vfcvtftoi,\
vfwcvtitof,vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,\
vfncvtftof,vfclass,vldff,vlsegde,vlsegdff")
(symbol_ref "riscv_vector::get_ma(operands[6])")
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
;; wheras it is pred_strided_load if operands[3] is vector mode.
(eq_attr "type" "vlds")
(if_then_else (match_test "VECTOR_MODE_P (GET_MODE (operands[3]))")
(symbol_ref "riscv_vector::get_ma(operands[7])")
(symbol_ref "riscv_vector::get_ma(operands[6])"))
(eq_attr "type" "vldux,vldox,vialu,vshift,viminmax,vimul,vidiv,vsalu,\
viwalu,viwmul,vnshift,vaalu,vsmul,vsshift,\
vnclip,vicmp,vfalu,vfmul,vfminmax,vfdiv,\
vfwalu,vfwmul,vfsgnj,vfcmp,vslideup,vslidedown,\
vislide1up,vislide1down,vfslide1up,vfslide1down,vgather,\
viwmuladd,vfwmuladd,vlsegds,vlsegdux,vlsegdox")
(symbol_ref "riscv_vector::get_ma(operands[7])")
(eq_attr "type" "vimuladd,vfmuladd")
(symbol_ref "riscv_vector::get_ma(operands[8])")
(eq_attr "type" "vmsfs,vmidx")
(symbol_ref "riscv_vector::get_ma(operands[5])")]
(const_int INVALID_ATTRIBUTE)))
;; The avl type value.
(define_attr "avl_type_idx" ""
(cond [(eq_attr "type" "vlde,vldff,vste,vimov,vfmov,vext,vimerge,\
vfsqrt,vfrecp,vfmerge,vfcvtitof,vfcvtftoi,vfwcvtitof,\
vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,\
vfclass,vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
vimovxv,vfmovfv,vlsegde,vlsegdff")
(const_int 7)
(eq_attr "type" "vldm,vstm,vmalu,vmalu")
(const_int 5)
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
;; wheras it is pred_strided_load if operands[3] is vector mode.
(eq_attr "type" "vlds")
(if_then_else (match_test "VECTOR_MODE_P (GET_MODE (operands[3]))")
(const_int 8)
(const_int 7))
(eq_attr "type" "vldux,vldox,vialu,vshift,viminmax,vimul,vidiv,vsalu,\
viwalu,viwmul,vnshift,vaalu,vsmul,vsshift,\
vnclip,vicmp,vfalu,vfmul,vfminmax,vfdiv,vfwalu,vfwmul,\
vfsgnj,vfcmp,vslideup,vslidedown,vislide1up,\
vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd,\
vlsegds,vlsegdux,vlsegdox")
(const_int 8)
(eq_attr "type" "vstux,vstox,vssegts,vssegtux,vssegtox")
(const_int 5)
(eq_attr "type" "vimuladd,vfmuladd")
(const_int 9)
(eq_attr "type" "vmsfs,vmidx,vcompress")
(const_int 6)
(eq_attr "type" "vmpop,vmffs,vssegte")
(const_int 4)]
(const_int INVALID_ATTRIBUTE)))
;; Defines rounding mode of an fixed-point operation.
(define_attr "vxrm_mode" "rnu,rne,rdn,rod,none"
(cond [(eq_attr "type" "vaalu,vsmul,vsshift,vnclip")
(cond
[(match_test "INTVAL (operands[9]) == riscv_vector::VXRM_RNU")
(const_string "rnu")
(match_test "INTVAL (operands[9]) == riscv_vector::VXRM_RNE")
(const_string "rne")
(match_test "INTVAL (operands[9]) == riscv_vector::VXRM_RDN")
(const_string "rdn")
(match_test "INTVAL (operands[9]) == riscv_vector::VXRM_ROD")
(const_string "rod")]
(const_string "none"))]
(const_string "none")))
;; Defines rounding mode of an floating-point operation.
(define_attr "frm_mode" ""
(cond [(eq_attr "type" "vfalu,vfwalu,vfmul,vfdiv,vfwmul")
(symbol_ref "riscv_vector::FRM_DYN")]
(symbol_ref "riscv_vector::FRM_NONE")))
;; -----------------------------------------------------------------
;; ---- Miscellaneous Operations
;; -----------------------------------------------------------------
(define_insn "@vundefined"
[(set (match_operand:V 0 "register_operand" "=vr")
(unspec:V [(reg:SI X0_REGNUM)] UNSPEC_VUNDEF))]
"TARGET_VECTOR"
""
[(set_attr "type" "vector")])
(define_insn "@vundefined"
[(set (match_operand:VB 0 "register_operand" "=vr")
(unspec:VB [(reg:SI X0_REGNUM)] UNSPEC_VUNDEF))]
"TARGET_VECTOR"
""
[(set_attr "type" "vector")])
(define_insn "@vundefined"
[(set (match_operand:VT 0 "register_operand" "=vr")
(unspec:VT [(reg:SI X0_REGNUM)] UNSPEC_VUNDEF))]
"TARGET_VECTOR"
""
[(set_attr "type" "vector")])
(define_expand "@vreinterpret"
[(set (match_operand:V 0 "register_operand")
(match_operand 1 "vector_any_register_operand"))]
"TARGET_VECTOR"
{
emit_move_insn (operands[0], gen_lowpart (mode, operands[1]));
DONE;
}
)
(define_expand "@vreinterpret"
[(set (match_operand:VB 0 "register_operand")
(match_operand 1 "vector_any_register_operand"))]
"TARGET_VECTOR"
{
emit_move_insn (operands[0], gen_lowpart (mode, operands[1]));
DONE;
}
)
;; This pattern is used to hold the AVL operand for
;; RVV instructions that implicity use VLMAX AVL.
;; RVV instruction implicitly use GPR that is ultimately
;; defined by this pattern is safe for VSETVL pass emit
;; a vsetvl instruction modify this register after RA.
;; Case 1:
;; vlmax_avl a5
;; ... (across many blocks)
;; vadd (implicit use a5) ====> emit: vsetvl a5,zero
;; Case 2:
;; vlmax_avl a5
;; ... (across many blocks)
;; mv a6,a5
;; ... (across many blocks)
;; vadd (implicit use a6) ====> emit: vsetvl a6,zero
;; Case 3:
;; vlmax_avl a5
;; ... (across many blocks)
;; store mem,a5 (spill)
;; ... (across many blocks)
;; load a7,mem (spill)
;; ... (across many blocks)
;; vadd (implicit use a7) ====> emit: vsetvl a7,zero
;; Such cases are all safe for VSETVL PASS to emit a vsetvl
;; instruction that modifies the AVL operand.
(define_insn "@vlmax_avl"
[(set (match_operand:P 0 "register_operand" "=r")
(unspec:P [(match_operand:P 1 "const_int_operand" "i")] UNSPEC_VLMAX))]
"TARGET_VECTOR"
""
[(set_attr "type" "vsetvl_pre")]
)
;; Set VXRM
(define_insn "vxrmsi"
[(set (reg:SI VXRM_REGNUM)
(match_operand:SI 0 "const_int_operand" "i"))]
"TARGET_VECTOR"
"csrwi\tvxrm,%0"
[(set_attr "type" "wrvxrm")
(set_attr "mode" "SI")])
;; Set FRM
(define_insn "fsrmsi_backup"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(reg:SI FRM_REGNUM))
(set (reg:SI FRM_REGNUM)
(match_operand:SI 1 "reg_or_int_operand" "r,i"))]
"TARGET_VECTOR"
"@
fsrm\t%0,%1
fsrmi\t%0,%1"
[(set_attr "type" "wrfrm,wrfrm")
(set_attr "mode" "SI")]
)
(define_insn "fsrmsi_restore"
[(set (reg:SI FRM_REGNUM)
(match_operand:SI 0 "reg_or_int_operand" "r,i"))]
"TARGET_VECTOR"
"@
fsrm\t%0
fsrmi\t%0"
[(set_attr "type" "wrfrm,wrfrm")
(set_attr "mode" "SI")]
)
;; The volatile fsrmsi restore is used for the exit point for the
;; dynamic mode switching. It will generate one volatile fsrm a5
;; which won't be eliminated.
(define_insn "fsrmsi_restore_volatile"
[(set (reg:SI FRM_REGNUM)
(unspec_volatile:SI [(match_operand:SI 0 "register_operand" "r")]
UNSPECV_FRM_RESTORE_EXIT))]
"TARGET_VECTOR"
"fsrm\t%0"
[(set_attr "type" "wrfrm")
(set_attr "mode" "SI")]
)
;; Read FRM
(define_insn "frrmsi"
[(set (match_operand:SI 0 "register_operand" "=r")
(reg:SI FRM_REGNUM))]
"TARGET_VECTOR"
"frrm\t%0"
[(set_attr "type" "rdfrm")
(set_attr "mode" "SI")]
)
;; -----------------------------------------------------------------
;; ---- Moves Operations
;; -----------------------------------------------------------------
(define_expand "mov"
[(set (match_operand:V 0 "reg_or_mem_operand")
(match_operand:V 1 "general_operand"))]
"TARGET_VECTOR"
{
/* For whole register move, we transform the pattern into the format
that excludes the clobber of scratch register.
We include clobber of a scalar scratch register which is going to be
used for emit of vsetvl instruction after reload_completed since we
need vsetvl instruction to set VL/VTYPE global status for fractional
vector load/store.
For example:
[(set (match_operand:RVVMF8QI v24)
(match_operand:RVVMF8QI (mem: a4)))
(clobber (scratch:SI a5))]
====>> vsetvl a5,zero,e8,mf8
====>> vle8.v v24,(a4)
Philosophy:
- Clobber a scalar scratch register for each mov.
- Classify the machine_mode mode = mode into 2 class:
Whole register move and fractional register move.
- Transform and remove scratch clobber register for whole
register move so that we can avoid occupying the scalar
registers.
- We can not leave it to TARGET_SECONDARY_RELOAD since it happens
before spilling. The clobber scratch is used by spilling fractional
registers in IRA/LRA so it's too early. */
if (riscv_vector::legitimize_move (operands[0], &operands[1]))
DONE;
})
;; This pattern is used for code-gen for whole register load/stores.
;; Also applicable for all register moves.
;; Fractional vector modes load/store are not allowed to match this pattern.
;; Mask modes load/store are not allowed to match this pattern.
;; We seperate "*mov" into "*mov_whole" and "*mov_fract" because
;; we don't want to include fractional load/store in "*mov" which will
;; create unexpected patterns in LRA.
;; For example:
;; ira rtl:
;; (insn 20 19 9 2 (set (reg/v:RVVMF4QI 97 v1 [ v1 ])
;; (reg:RVVMF4QI 134 [ _1 ])) "rvv.c":9:22 571 {*movvnx2qi_fract}
;; (nil))
;; When the value of pseudo register 134 of the insn above is discovered already
;; spilled in the memory during LRA.
;; LRA will reload this pattern into a memory load instruction pattern.
;; Because RVVMF4QI is a fractional vector, we want LRA reload this pattern into
;; (insn 20 19 9 2 (parallel [
;; (set (reg:RVVMF4QI 98 v2 [orig:134 _1 ] [134])
;; (mem/c:RVVMF4QI (reg:SI 13 a3 [155]) [1 %sfp+[-2, -2] S[2, 2] A8]))
;; (clobber (reg:SI 14 a4 [149]))])
;; So that we could be able to emit vsetvl instruction using clobber sratch a4.
;; To let LRA generate the expected pattern, we should exclude fractional vector
;; load/store in "*mov_whole". Otherwise, it will reload this pattern into:
;; (insn 20 19 9 2 (set (reg:RVVMF4QI 98 v2 [orig:134 _1 ] [134])
;; (mem/c:RVVMF4QI (reg:SI 13 a3 [155]) [1 %sfp+[-2, -2] S[2, 2] A8])))
;; which is not the pattern we want.
;; According the facts above, we make "*mov_whole" includes load/store/move for whole
;; vector modes according to '-march' and "*mov_fract" only include fractional vector modes.
(define_insn "*mov_whole"
[(set (match_operand:V_WHOLE 0 "reg_or_mem_operand" "=vr, m,vr")
(match_operand:V_WHOLE 1 "reg_or_mem_operand" " m,vr,vr"))]
"TARGET_VECTOR"
"@
vl%m1re.v\t%0,%1
vs%m1r.v\t%1,%0
vmv%m1r.v\t%0,%1"
[(set_attr "type" "vldr,vstr,vmov")
(set_attr "mode" "")])
(define_insn "*mov_fract"
[(set (match_operand:V_FRACT 0 "register_operand" "=vr")
(match_operand:V_FRACT 1 "register_operand" " vr"))]
"TARGET_VECTOR"
"vmv1r.v\t%0,%1"
[(set_attr "type" "vmov")
(set_attr "mode" "")])
(define_expand "mov"
[(set (match_operand:VB 0 "reg_or_mem_operand")
(match_operand:VB 1 "general_operand"))]
"TARGET_VECTOR"
{
if (riscv_vector::legitimize_move (operands[0], &operands[1]))
DONE;
})
(define_insn "*mov"
[(set (match_operand:VB 0 "register_operand" "=vr")
(match_operand:VB 1 "register_operand" " vr"))]
"TARGET_VECTOR"
"vmv1r.v\t%0,%1"
[(set_attr "type" "vmov")
(set_attr "mode" "")])
(define_expand "@mov_lra"
[(parallel
[(set (match_operand:V_FRACT 0 "reg_or_mem_operand")
(match_operand:V_FRACT 1 "reg_or_mem_operand"))
(clobber (match_scratch:P 2))])]
"TARGET_VECTOR && (lra_in_progress || reload_completed)"
{})
(define_expand "@mov_lra"
[(parallel
[(set (match_operand:VB 0 "reg_or_mem_operand")
(match_operand:VB 1 "reg_or_mem_operand"))
(clobber (match_scratch:P 2))])]
"TARGET_VECTOR && (lra_in_progress || reload_completed)"
{})
(define_insn_and_split "*mov_lra"
[(set (match_operand:V_FRACT 0 "reg_or_mem_operand" "=vr, m,vr")
(match_operand:V_FRACT 1 "reg_or_mem_operand" " m,vr,vr"))
(clobber (match_scratch:P 2 "=&r,&r,X"))]
"TARGET_VECTOR && (lra_in_progress || reload_completed)"
"#"
"&& reload_completed"
[(const_int 0)]
{
if (REG_P (operands[0]) && REG_P (operands[1]))
emit_insn (gen_rtx_SET (operands[0], operands[1]));
else
{
riscv_vector::emit_vlmax_vsetvl (mode, operands[2]);
riscv_vector::emit_vlmax_insn_lra (code_for_pred_mov (mode),
riscv_vector::UNARY_OP, operands, operands[2]);
}
DONE;
}
[(set_attr "type" "vector")]
)
(define_insn_and_split "*mov_lra"
[(set (match_operand:VB 0 "reg_or_mem_operand" "=vr, m,vr")
(match_operand:VB 1 "reg_or_mem_operand" " m,vr,vr"))
(clobber (match_scratch:P 2 "=&r,&r,X"))]
"TARGET_VECTOR && (lra_in_progress || reload_completed)"
"#"
"&& reload_completed"
[(const_int 0)]
{
if (REG_P (operands[0]) && REG_P (operands[1]))
emit_insn (gen_rtx_SET (operands[0], operands[1]));
else
{
riscv_vector::emit_vlmax_vsetvl (mode, operands[2]);
riscv_vector::emit_vlmax_insn_lra (code_for_pred_mov (mode),
riscv_vector::UNARY_MASK_OP, operands, operands[2]);
}
DONE;
}
[(set_attr "type" "vector")]
)
;; Define tuple modes data movement.
;; operands[2] is used to save the offset of each subpart.
;; operands[3] is used to calculate the address for each subpart.
;; operands[4] is VL of vsevli instruction.
(define_expand "mov"
[(parallel [(set (match_operand:VT 0 "reg_or_mem_operand")
(match_operand:VT 1 "general_operand"))
(clobber (match_dup 2))
(clobber (match_dup 3))
(clobber (match_dup 4))])]
"TARGET_VECTOR"
{
/* Need to force register if mem <- !reg. */
if (MEM_P (operands[0]) && !REG_P (operands[1]))
operands[1] = force_reg (mode, operands[1]);
if (GET_CODE (operands[1]) == CONST_VECTOR)
{
riscv_vector::expand_tuple_move (operands);
DONE;
}
operands[2] = gen_rtx_SCRATCH (Pmode);
operands[3] = gen_rtx_SCRATCH (Pmode);
operands[4] = gen_rtx_SCRATCH (Pmode);
})
(define_insn_and_split "*mov_"
[(set (match_operand:VT 0 "reg_or_mem_operand" "=vr,vr, m")
(match_operand:VT 1 "reg_or_mem_operand" " vr, m,vr"))
(clobber (match_scratch:P 2 "=X,&r,&r"))
(clobber (match_scratch:P 3 "=X,&r,&r"))
(clobber (match_scratch:P 4 "=X,&r,&r"))]
"TARGET_VECTOR"
"#"
"&& reload_completed"
[(const_int 0)]
{
riscv_vector::expand_tuple_move (operands);
DONE;
}
[(set_attr "type" "vmov,vlde,vste")
(set_attr "mode" "")
(set (attr "avl_type_idx") (const_int INVALID_ATTRIBUTE))
(set (attr "mode_idx") (const_int INVALID_ATTRIBUTE))])
;; -----------------------------------------------------------------
;; ---- VLS Moves Operations
;; -----------------------------------------------------------------
(define_expand "mov"
[(set (match_operand:VLS_AVL_IMM 0 "reg_or_mem_operand")
(match_operand:VLS_AVL_IMM 1 "general_operand"))]
"TARGET_VECTOR"
{
if (riscv_vector::legitimize_move (operands[0], &operands[1]))
DONE;
})
(define_insn_and_split "*mov"
[(set (match_operand:VLS_AVL_IMM 0 "reg_or_mem_operand" "=vr, m, vr")
(match_operand:VLS_AVL_IMM 1 "reg_or_mem_operand" " m,vr, vr"))]
"TARGET_VECTOR
&& (register_operand (operands[0], mode)
|| register_operand (operands[1], mode))"
"@
#
#
vmv%m1r.v\t%0,%1"
"&& reload_completed
&& (!register_operand (operands[0], mode)
|| !register_operand (operands[1], mode))"
[(const_int 0)]
{
bool ok_p = riscv_vector::legitimize_move (operands[0], &operands[1]);
gcc_assert (ok_p);
DONE;
}
[(set_attr "type" "vmov")]
)
(define_expand "mov"
[(set (match_operand:VLS_AVL_REG 0 "reg_or_mem_operand")
(match_operand:VLS_AVL_REG 1 "general_operand"))]
"TARGET_VECTOR"
{
bool ok_p = riscv_vector::legitimize_move (operands[0], &operands[1]);
gcc_assert (ok_p);
DONE;
})
(define_expand "@mov_lra"
[(parallel
[(set (match_operand:VLS_AVL_REG 0 "reg_or_mem_operand")
(match_operand:VLS_AVL_REG 1 "reg_or_mem_operand"))
(clobber (match_scratch:P 2))])]
"TARGET_VECTOR && (lra_in_progress || reload_completed)"
{})
(define_insn_and_split "*mov_lra"
[(set (match_operand:VLS_AVL_REG 0 "reg_or_mem_operand" "=vr, m,vr")
(match_operand:VLS_AVL_REG 1 "reg_or_mem_operand" " m,vr,vr"))
(clobber (match_scratch:P 2 "=&r,&r,X"))]
"TARGET_VECTOR && (lra_in_progress || reload_completed)
&& (register_operand (operands[0], mode)
|| register_operand (operands[1], mode))"
"#"
"&& reload_completed"
[(const_int 0)]
{
if (REG_P (operands[0]) && REG_P (operands[1]))
emit_insn (gen_rtx_SET (operands[0], operands[1]));
else
{
emit_move_insn (operands[2], gen_int_mode (GET_MODE_NUNITS (mode),
Pmode));
unsigned insn_flags
= GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL
? riscv_vector::UNARY_MASK_OP
: riscv_vector::UNARY_OP;
riscv_vector::emit_nonvlmax_insn (code_for_pred_mov (mode),
insn_flags, operands, operands[2]);
}
DONE;
}
[(set_attr "type" "vmov")]
)
(define_insn "*mov_vls"
[(set (match_operand:VLS 0 "register_operand" "=vr")
(match_operand:VLS 1 "register_operand" " vr"))]
"TARGET_VECTOR"
"vmv%m1r.v\t%0,%1"
[(set_attr "type" "vmov")
(set_attr "mode" "")])
(define_insn "*mov_vls"
[(set (match_operand:VLSB 0 "register_operand" "=vr")
(match_operand:VLSB 1 "register_operand" " vr"))]
"TARGET_VECTOR"
"vmv1r.v\t%0,%1"
[(set_attr "type" "vmov")
(set_attr "mode" "")])
(define_expand "movmisalign"
[(set (match_operand:VLS 0 "nonimmediate_operand")
(match_operand:VLS 1 "general_operand"))]
"TARGET_VECTOR"
{
/* To support misalign data movement, we should use
minimum element alignment load/store. */
unsigned int size = GET_MODE_SIZE (GET_MODE_INNER (mode));
poly_int64 nunits = GET_MODE_NUNITS (mode) * size;
machine_mode mode = riscv_vector::get_vector_mode (QImode, nunits).require ();
operands[0] = gen_lowpart (mode, operands[0]);
operands[1] = gen_lowpart (mode, operands[1]);
if (MEM_P (operands[0]) && !register_operand (operands[1], mode))
operands[1] = force_reg (mode, operands[1]);
riscv_vector::emit_vlmax_insn (code_for_pred_mov (mode), riscv_vector::UNARY_OP, operands);
DONE;
}
)
;; According to RVV ISA:
;; If an element accessed by a vector memory instruction is not naturally aligned to the size of the element,
;; either the element is transferred successfully or an address misaligned exception is raised on that element.
(define_expand "movmisalign"
[(set (match_operand:V 0 "nonimmediate_operand")
(match_operand:V 1 "general_operand"))]
"TARGET_VECTOR && TARGET_VECTOR_MISALIGN_SUPPORTED"
{
emit_move_insn (operands[0], operands[1]);
DONE;
}
)
;; -----------------------------------------------------------------
;; ---- Duplicate Operations
;; -----------------------------------------------------------------
(define_expand "vec_duplicate"
[(set (match_operand:V_VLS 0 "register_operand")
(vec_duplicate:V_VLS
(match_operand: 1 "direct_broadcast_operand")))]
"TARGET_VECTOR"
{
/* Early expand DImode broadcast in RV32 system to avoid RA reload
generate (set (reg) (vec_duplicate:DI)). */
if (maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (Pmode)))
{
riscv_vector::emit_vlmax_insn (code_for_pred_broadcast (mode),
riscv_vector::UNARY_OP, operands);
DONE;
}
/* Otherwise, allow it fall into general vec_duplicate pattern
which allow us to have vv->vx combine optimization in later pass. */
})
;; According to GCC internal:
;; This pattern only handles duplicates of non-constant inputs.
;; Constant vectors go through the movm pattern instead.
;; So "direct_broadcast_operand" can only be mem or reg, no CONSTANT.
(define_insn_and_split "*vec_duplicate"
[(set (match_operand:V_VLS 0 "register_operand")
(vec_duplicate:V_VLS
(match_operand: 1 "direct_broadcast_operand")))]
"TARGET_VECTOR && can_create_pseudo_p ()"
"#"
"&& 1"
[(const_int 0)]
{
riscv_vector::emit_vlmax_insn (code_for_pred_broadcast (mode),
riscv_vector::UNARY_OP, operands);
DONE;
}
[(set_attr "type" "vector")]
)
;; -----------------------------------------------------------------
;; ---- 6. Configuration-Setting Instructions
;; -----------------------------------------------------------------
;; Includes:
;; - 6.1 vsetvli/vsetivl/vsetvl instructions
;; -----------------------------------------------------------------
;; we dont't define vsetvli as unspec_volatile which has side effects.
;; This instruction can be scheduled by the instruction scheduler.
;; This means these instructions will be deleted when
;; there is no instructions using vl or vtype in the following.
;; rd | rs1 | AVL value | Effect on vl
;; - | !x0 | x[rs1] | Normal stripmining
;; !x0 | x0 | ~0 | Set vl to VLMAX
;; operands[0]: VL.
;; operands[1]: AVL.
;; operands[2]: SEW
;; operands[3]: LMUL
;; operands[4]: Tail policy 0 or 1 (undisturbed/agnostic)
;; operands[5]: Mask policy 0 or 1 (undisturbed/agnostic)
;; We define 2 types of "vsetvl*" instruction patterns:
;; - "@vsetvl" is a parallel format which has side effects.
;; - "@vsetvl_no_side_effects" has no side effects.
;; - "@vsetvl" is used by "vsetvl" intrinsics and "insert-vsetvl" PASS.
;; - "@vsetvl_no_side_effects" is used by GCC standard patterns.
;; - "@vsetvl" includes VL/VTYPE global registers status (define set)
;; and each RVV instruction includes VL/VTYPE global registers status (use)
;; so that we can guarantee each RVV instruction can execute with correct
;; VL/VTYPE global registers status after "insert-vsetvl" PASS.
;; - "@vsetvl_no_side_effects" has no side effects and excludes VL/VTYPE
;; global registers status (define set). It's only used by GCC standard pattern
;; expansion. For example: "mov" pattern for fractional vector modes which
;; need to set VL/VTYPE. Then we could manually call this pattern to gain benefits
;; from the optimization of each GCC internal PASS.
;; 1. void foo (float *in, float *out)
;; {
;; vfloat32mf2_t v = *(vfloat32mf2_t*)in;
;; *(vfloat32mf2_t*)out = v;
;; }
;; We could eliminate the second "vsetvl" by calling "@vsetvl_no_side_effects".
;;
;; "@vsetvl": ;; "@vsetvl_no_side_effects":
;; vsetvli a4,zero,e32,mf2,ta,ma ;; vsetvli a4,zero,e32,mf2,ta,ma
;; vle32.v v24,(a0) ;; vle32.v v24,(a0)
;; vsetvli a4,zero,e32,mf2,ta,ma ;; --
;; vse32.v v24,(a1) ;; vse32.v v24,(a1)
;; ret ;; ret
;; 2. void foo (int8_t *in, int8_t *out, int M)
;; {
;; for (int i = 0; i < M; i++){
;; vint8mf2_t v = *(vint8mf2_t*)(in + i);
;; *(vint8mf2_t*)(out + i) = v;
;; }
;; }
;;
;; Hoist "vsetvl" instruction in LICM:
;; "@vsetvl": ;; "@vsetvl_no_side_effects":
;; - ;; vsetvli a4,zero,e32,mf2,ta,ma
;; LOOP: ;; LOOP:
;; vsetvli a4,zero,e32,mf2,ta,ma ;; -
;; vle32.v v24,(a0) ;; vle32.v v24,(a0)
;; vsetvli a4,zero,e32,mf2,ta,ma ;; -
;; vse32.v v24,(a1) ;; vse32.v v24,(a1)
;; However, it may produce wrong codegen if we exclude VL/VTYPE in "vsevl".
;; 3. void foo (int8_t *in, int8_t *out, int32_t *in2, int32_t *out2, int M)
;; {
;; for (int i = 0; i < M; i++){
;; vint8mf2_t v = *(vint8mf2_t*)(in + i);
;; vint32mf2_t v2 = *(vint32mf2_t*)(in + i + i);
;; *(vint8mf2_t*)(out + i) = v;
;; *(vint32mf2_t*)(out + i + i) = v2;
;; }
;; }
;;
;; vsetvli a6,zero,e8,mf2,ta,ma
;; vsetvli a2,zero,e32,mf2,ta,ma
;; LOOP:
;; vle8.v v25,(a0)
;; vle32.v v24,(a5)
;; addi a0,a0,1
;; vse8.v v25,(a1)
;; vse32.v v24,(a3)
;;
;; Both vle8.v and vle32.v are using the wrong VL/VTYPE status.
;; We leave it to "insert-vsetvl" PASS to correct this situation.
;; The "insert-vsetvl" PASS mechanism:
;; 1. Before "insert-vsetvl" PASS, only RVV instructions are generated
;; by GCC standard pattern expansion has the corresponding "vsetvl".
;; We exploit each GCC internal optimization pass to optimize the "vsetvl".
;; 2. Correct the VL/VTYPE status for each GCC standard pattern RVV instructions.
;; Insert vsetvl for each RVV instructions that has no VL/VTYPE status if necessary.
;; For example: RVV intrinsics.
;; 3. Optimize "vsetvl" instructions.
(define_insn "@vsetvl"
[(set (match_operand:P 0 "register_operand" "=r")
(unspec:P [(match_operand:P 1 "csr_operand" "rK")
(match_operand 2 "const_int_operand" "i")
(match_operand 3 "const_int_operand" "i")
(match_operand 4 "const_int_operand" "i")
(match_operand 5 "const_int_operand" "i")] UNSPEC_VSETVL))
(set (reg:SI VL_REGNUM)
(unspec:SI [(match_dup 1)
(match_dup 2)
(match_dup 3)] UNSPEC_VSETVL))
(set (reg:SI VTYPE_REGNUM)
(unspec:SI [(match_dup 2)
(match_dup 3)
(match_dup 4)
(match_dup 5)] UNSPEC_VSETVL))]
"TARGET_VECTOR"
"vset%i1vli\t%0,%1,e%2,%m3,t%p4,m%p5"
[(set_attr "type" "vsetvl")
(set_attr "mode" "")
(set (attr "sew") (symbol_ref "INTVAL (operands[2])"))
(set (attr "vlmul") (symbol_ref "INTVAL (operands[3])"))
(set (attr "ta") (symbol_ref "INTVAL (operands[4])"))
(set (attr "ma") (symbol_ref "INTVAL (operands[5])"))])
;; vsetvl zero,zero,vtype instruction.
;; This pattern has no side effects and does not set X0 register.
(define_insn "vsetvl_vtype_change_only"
[(set (reg:SI VTYPE_REGNUM)
(unspec:SI
[(match_operand 0 "const_int_operand" "i")
(match_operand 1 "const_int_operand" "i")
(match_operand 2 "const_int_operand" "i")
(match_operand 3 "const_int_operand" "i")] UNSPEC_VSETVL))]
"TARGET_VECTOR"
"vsetvli\tzero,zero,e%0,%m1,t%p2,m%p3"
[(set_attr "type" "vsetvl")
(set_attr "mode" "SI")
(set (attr "sew") (symbol_ref "INTVAL (operands[0])"))
(set (attr "vlmul") (symbol_ref "INTVAL (operands[1])"))
(set (attr "ta") (symbol_ref "INTVAL (operands[2])"))
(set (attr "ma") (symbol_ref "INTVAL (operands[3])"))])
;; vsetvl zero,rs1,vtype instruction.
;; The reason we need this pattern since we should avoid setting X0 register
;; in vsetvl instruction pattern.
(define_insn "@vsetvl_discard_result"
[(set (reg:SI VL_REGNUM)
(unspec:SI [(match_operand:P 0 "csr_operand" "rK")
(match_operand 1 "const_int_operand" "i")
(match_operand 2 "const_int_operand" "i")] UNSPEC_VSETVL))
(set (reg:SI VTYPE_REGNUM)
(unspec:SI [(match_dup 1)
(match_dup 2)
(match_operand 3 "const_int_operand" "i")
(match_operand 4 "const_int_operand" "i")] UNSPEC_VSETVL))]
"TARGET_VECTOR"
"vset%i0vli\tzero,%0,e%1,%m2,t%p3,m%p4"
[(set_attr "type" "vsetvl")
(set_attr "mode" "")
(set (attr "sew") (symbol_ref "INTVAL (operands[1])"))
(set (attr "vlmul") (symbol_ref "INTVAL (operands[2])"))
(set (attr "ta") (symbol_ref "INTVAL (operands[3])"))
(set (attr "ma") (symbol_ref "INTVAL (operands[4])"))])
;; It's emit by vsetvl/vsetvlmax intrinsics with no side effects.
;; Since we have many optmization passes from "expand" to "reload_completed",
;; such pattern can allow us gain benefits of these optimizations.
(define_insn_and_split "@vsetvl_no_side_effects"
[(set (match_operand:P 0 "register_operand" "=r")
(unspec:P [(match_operand:P 1 "csr_operand" "rK")
(match_operand 2 "const_int_operand" "i")
(match_operand 3 "const_int_operand" "i")
(match_operand 4 "const_int_operand" "i")
(match_operand 5 "const_int_operand" "i")] UNSPEC_VSETVL))]
"TARGET_VECTOR"
"#"
"&& epilogue_completed"
[(parallel
[(set (match_dup 0)
(unspec:P [(match_dup 1) (match_dup 2) (match_dup 3)
(match_dup 4) (match_dup 5)] UNSPEC_VSETVL))
(set (reg:SI VL_REGNUM)
(unspec:SI [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_VSETVL))
(set (reg:SI VTYPE_REGNUM)
(unspec:SI [(match_dup 2) (match_dup 3) (match_dup 4)
(match_dup 5)] UNSPEC_VSETVL))])]
""
[(set_attr "type" "vsetvl")
(set_attr "mode" "SI")])
;; This pattern use to combine bellow two insns and then further remove
;; unnecessary sign_extend operations:
;; (set (reg:DI 134 [ _1 ])
;; (unspec:DI [
;; (const_int 19 [0x13])
;; (const_int 8 [0x8])
;; (const_int 5 [0x5])
;; (const_int 2 [0x2]) repeated x2
;; ] UNSPEC_VSETVL))
;; (set (reg/v:DI 135 [ ])
;; (sign_extend:DI (subreg:SI (reg:DI 134 [ _1 ]) 0)))
;;
;; The reason we can remove signe_extend is because currently the vl value
;; returned by the vsetvl instruction ranges from 0 to 65536 (uint16_t), and
;; bits 17 to 63 (including 31) are always 0, so there is no change after
;; sign_extend. Note that for HI and QI modes we cannot do this.
;; Of course, if the range of instructions returned by vsetvl later expands
;; to 32bits, then this combine pattern needs to be removed. But that could be
;; a long time from now.
(define_insn_and_split "*vsetvldi_no_side_effects_si_extend"
[(set (match_operand:DI 0 "register_operand")
(sign_extend:DI
(subreg:SI
(unspec:DI [(match_operand:P 1 "csr_operand")
(match_operand 2 "const_int_operand")
(match_operand 3 "const_int_operand")
(match_operand 4 "const_int_operand")
(match_operand 5 "const_int_operand")] UNSPEC_VSETVL) 0)))]
"TARGET_VECTOR && TARGET_64BIT"
"#"
"&& 1"
[(set (match_dup 0)
(unspec:DI [(match_dup 1)
(match_dup 2)
(match_dup 3)
(match_dup 4)
(match_dup 5)] UNSPEC_VSETVL))]
""
[(set_attr "type" "vsetvl")
(set_attr "mode" "SI")])
;; RVV machine description matching format
;; (define_insn ""
;; [(set (match_operand:MODE 0)
;; (if_then_else:MODE
;; (unspec:
;; [(match_operand: 1 "vector_mask_operand")
;; (match_operand N + 4 "vector_length_operand")
;; (match_operand N + 5 "const_int_operand")
;; (match_operand N + 6 "const_int_operand")
;; (reg:SI VL_REGNUM)
;; (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
;; (instruction operation:MODE
;; (match_operand 3
;; (match_operand 4
;; (match_operand 5
;; ................
;; (match_operand N + 3)
;; (match_operand:MODE 2 "vector_reg_or_const0_operand")))]
;;
;; (unspec:[........] UNSPEC_VPREDICATE) is a predicate wrapper.
;; Include mask predicate && length predicate && vector policy.
;; -------------------------------------------------------------------------------
;; ---- Predicated Mov
;; -------------------------------------------------------------------------------
;; Includes:
;; - 7.4. Vector Unit-Stride Instructions
;; - 11.15 Vector Integer Merge Instructions
;; - 11.16 Vector Integer Move Instructions
;; - 13.16 Vector Floating-Point Move Instruction
;; - 15.1 Vector Mask-Register Logical Instructions
;; -------------------------------------------------------------------------------
;; vle.v/vse.v/vmv.v.v.
;; For vle.v/vmv.v.v, we may need merge and mask operand.
;; For vse.v, we don't need merge operand, so it should always match "vu".
;; constraint alternative 0 ~ 1 match vle.v.
;; constraint alternative 2 match vse.v.
;; constraint alternative 3 match vmv.v.v.
;; If operand 3 is a const_vector, then it is left to pred_braordcast patterns.
(define_expand "@pred_mov"
[(set (match_operand:V_VLS 0 "nonimmediate_operand")
(if_then_else:V_VLS
(unspec:
[(match_operand: 1 "vector_mask_operand")
(match_operand 4 "vector_length_operand")
(match_operand 5 "const_int_operand")
(match_operand 6 "const_int_operand")
(match_operand 7 "const_int_operand")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(match_operand:V_VLS 3 "vector_move_operand")
(match_operand:V_VLS 2 "vector_merge_operand")))]
"TARGET_VECTOR"
{})
;; vle.v/vse.v,vmv.v.v
(define_insn_and_split "*pred_mov"
[(set (match_operand:V_VLS 0 "nonimmediate_operand" "=vr, vr, vd, m, vr, vr")
(if_then_else:V_VLS
(unspec:
[(match_operand: 1 "vector_mask_operand" "vmWc1, Wc1, vm, vmWc1, Wc1, Wc1")
(match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK")
(match_operand 5 "const_int_operand" " i, i, i, i, i, i")
(match_operand 6 "const_int_operand" " i, i, i, i, i, i")
(match_operand 7 "const_int_operand" " i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(match_operand:V_VLS 3 "reg_or_mem_operand" " m, m, m, vr, vr, vr")
(match_operand:V_VLS 2 "vector_merge_operand" " 0, vu, vu, vu, vu, 0")))]
"(TARGET_VECTOR
&& (register_operand (operands[0], mode)
|| register_operand (operands[3], mode)))"
"@
vle.v\t%0,%3%p1
vle.v\t%0,%3
vle.v\t%0,%3,%1.t
vse.v\t%3,%0%p1
vmv.v.v\t%0,%3
vmv.v.v\t%0,%3"
"&& register_operand (operands[0], mode)
&& register_operand (operands[3], mode)
&& satisfies_constraint_vu (operands[2])
&& INTVAL (operands[7]) == riscv_vector::VLMAX"
[(set (match_dup 0) (match_dup 3))]
""
[(set_attr "type" "vlde,vlde,vlde,vste,vimov,vimov")
(set_attr "mode" "")])
;; Dedicated pattern for vse.v instruction since we can't reuse pred_mov pattern to include
;; memory operand as input which will produce inferior codegen.
(define_insn "@pred_store"
[(set (match_operand:V 0 "memory_operand" "+m")
(if_then_else:V
(unspec:
[(match_operand: 1 "vector_mask_operand" "vmWc1")
(match_operand 3 "vector_length_operand" " rK")
(match_operand 4 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(match_operand:V 2 "register_operand" " vr")
(match_dup 0)))]
"TARGET_VECTOR"
"vse.v\t%2,%0%p1"
[(set_attr "type" "vste")
(set_attr "mode" "")
(set (attr "avl_type_idx") (const_int 4))
(set_attr "vl_op_idx" "3")])
;; vlm.v/vsm.v/vmclr.m/vmset.m.
;; constraint alternative 0 match vlm.v.
;; constraint alternative 1 match vsm.v.
;; constraint alternative 3 match vmclr.m.
;; constraint alternative 4 match vmset.m.
(define_insn_and_split "@pred_mov"
[(set (match_operand:VB_VLS 0 "nonimmediate_operand" "=vr, m, vr, vr, vr")
(if_then_else:VB_VLS
(unspec:VB_VLS
[(match_operand:VB_VLS 1 "vector_all_trues_mask_operand" "Wc1, Wc1, Wc1, Wc1, Wc1")
(match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK")
(match_operand 5 "const_int_operand" " i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(match_operand:VB_VLS 3 "vector_move_operand" " m, vr, vr, Wc0, Wc1")
(match_operand:VB_VLS 2 "vector_undef_operand" " vu, vu, vu, vu, vu")))]
"TARGET_VECTOR"
"@
vlm.v\t%0,%3
vsm.v\t%3,%0
vmmv.m\t%0,%3
vmclr.m\t%0
vmset.m\t%0"
"&& register_operand (operands[0], mode)
&& register_operand (operands[3], mode)
&& INTVAL (operands[5]) == riscv_vector::VLMAX"
[(set (match_dup 0) (match_dup 3))]
""
[(set_attr "type" "vldm,vstm,vmalu,vmalu,vmalu")
(set_attr "mode" "")])
;; Dedicated pattern for vsm.v instruction since we can't reuse pred_mov pattern to include
;; memory operand as input which will produce inferior codegen.
(define_insn "@pred_store"
[(set (match_operand:VB 0 "memory_operand" "+m")
(if_then_else:VB
(unspec:VB
[(match_operand:VB 1 "vector_all_trues_mask_operand" "Wc1")
(match_operand 3 "vector_length_operand" " rK")
(match_operand 4 "const_int_operand" " i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(match_operand:VB 2 "register_operand" " vr")
(match_dup 0)))]
"TARGET_VECTOR"
"vsm.v\t%2,%0"
[(set_attr "type" "vstm")
(set_attr "mode" "")
(set (attr "avl_type_idx") (const_int 4))
(set_attr "vl_op_idx" "3")])
(define_insn "@pred_merge"
[(set (match_operand:V_VLS 0 "register_operand" "=vd,vd,vd,vd")
(if_then_else:V_VLS
(unspec:
[(match_operand 5 "vector_length_operand" " rK,rK,rK,rK")
(match_operand 6 "const_int_operand" " i, i, i, i")
(match_operand 7 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(vec_merge:V_VLS
(match_operand:V_VLS 3 "vector_arith_operand" " vr,vr,vi,vi")
(match_operand:V_VLS 2 "register_operand" " vr,vr,vr,vr")
(match_operand: 4 "register_operand" " vm,vm,vm,vm"))
(match_operand:V_VLS 1 "vector_merge_operand" " vu, 0,vu, 0")))]
"TARGET_VECTOR"
"vmerge.v%o3m\t%0,%2,%v3,%4"
[(set_attr "type" "vimerge")
(set_attr "mode" "")])
(define_insn "@pred_merge_scalar"
[(set (match_operand:V_VLSI_QHS 0 "register_operand" "=vd,vd")
(if_then_else:V_VLSI_QHS
(unspec:
[(match_operand 5 "vector_length_operand" " rK,rK")
(match_operand 6 "const_int_operand" " i, i")
(match_operand 7 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(vec_merge:V_VLSI_QHS
(vec_duplicate:V_VLSI_QHS
(match_operand: 3 "register_operand" " r, r"))
(match_operand:V_VLSI_QHS 2 "register_operand" " vr,vr")
(match_operand: 4 "register_operand" " vm,vm"))
(match_operand:V_VLSI_QHS 1 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
"vmerge.vxm\t%0,%2,%3,%4"
[(set_attr "type" "vimerge")
(set_attr "mode" "")])
(define_expand "@pred_merge_scalar"
[(set (match_operand:V_VLSI_D 0 "register_operand")
(if_then_else:V_VLSI_D
(unspec:
[(match_operand 5 "vector_length_operand")
(match_operand 6 "const_int_operand")
(match_operand 7 "const_int_operand")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(vec_merge:V_VLSI_D
(vec_duplicate:V_VLSI_D
(match_operand: 3 "reg_or_int_operand"))
(match_operand:V_VLSI_D 2 "register_operand")
(match_operand: 4 "register_operand"))
(match_operand:V_VLSI_D 1 "vector_merge_operand")))]
"TARGET_VECTOR"
{
if (riscv_vector::sew64_scalar_helper (
operands,
/* scalar op */&operands[3],
/* vl */operands[5],
mode,
riscv_vector::simm5_p (operands[3]),
[] (rtx *operands, rtx boardcast_scalar) {
emit_insn (gen_pred_merge (operands[0], operands[1],
operands[2], boardcast_scalar, operands[4], operands[5],
operands[6], operands[7]));
},
(riscv_vector::avl_type) INTVAL (operands[7])))
DONE;
})
(define_insn "*pred_merge_scalar"
[(set (match_operand:V_VLSI_D 0 "register_operand" "=vd,vd")
(if_then_else:V_VLSI_D
(unspec:
[(match_operand 5 "vector_length_operand" " rK,rK")
(match_operand 6 "const_int_operand" " i, i")
(match_operand 7 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(vec_merge:V_VLSI_D
(vec_duplicate:V_VLSI_D
(match_operand: 3 "register_operand" " r, r"))
(match_operand:V_VLSI_D 2 "register_operand" " vr,vr")
(match_operand: 4 "register_operand" " vm,vm"))
(match_operand:V_VLSI_D 1 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
"vmerge.vxm\t%0,%2,%3,%4"
[(set_attr "type" "vimerge")
(set_attr "mode" "")])
(define_insn "*pred_merge_extended_scalar"
[(set (match_operand:V_VLSI_D 0 "register_operand" "=vd,vd")
(if_then_else:V_VLSI_D
(unspec:
[(match_operand 5 "vector_length_operand" " rK,rK")
(match_operand 6 "const_int_operand" " i, i")
(match_operand 7 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(vec_merge:V_VLSI_D
(vec_duplicate:V_VLSI_D
(sign_extend:
(match_operand: 3 "register_operand" " r, r")))
(match_operand:V_VLSI_D 2 "register_operand" " vr,vr")
(match_operand: 4 "register_operand" " vm,vm"))
(match_operand:V_VLSI_D 1 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
"vmerge.vxm\t%0,%2,%3,%4"
[(set_attr "type" "vimerge")
(set_attr "mode" "")])
;; -------------------------------------------------------------------------------
;; ---- Predicated Broadcast
;; -------------------------------------------------------------------------------
;; Includes:
;; - 7.5. Vector Strided Instructions (zero stride)
;; - 11.16 Vector Integer Move Instructions (vmv.v.x)
;; - 13.16 Vector Floating-Point Move Instruction (vfmv.v.f)
;; - 16.1 Integer Scalar Move Instructions (vmv.s.x)
;; - 16.2 Floating-Point Scalar Move Instructions (vfmv.s.f)
;; -------------------------------------------------------------------------------
;; According to RVV ISA, vector-scalar instruction doesn't support
;; operand fetched from 2 consecutive registers, so we should use
;; vlse.v which is a memory access to broadcast a DImode scalar into a vector.
;;
;; Since the optimization flow in GCC is as follows:
;; expand --> LICM (Loop invariant) --> split.
;; To use LICM optimization, we postpone generation of vlse.v to split stage since
;; a memory access instruction can not be optimized by LICM (Loop invariant).
(define_expand "@pred_broadcast"
[(set (match_operand:V_VLS 0 "register_operand")
(if_then_else:V_VLS
(unspec:
[(match_operand: 1 "vector_broadcast_mask_operand")
(match_operand 4 "vector_length_operand")
(match_operand 5 "const_int_operand")
(match_operand 6 "const_int_operand")
(match_operand 7 "const_int_operand")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(vec_duplicate:V_VLS
(match_operand: 3 "direct_broadcast_operand"))
(match_operand:V_VLS 2 "vector_merge_operand")))]
"TARGET_VECTOR"
{
/* Handle vmv.s.x instruction (Wb1 mask) which has memory scalar. */
if (satisfies_constraint_Wdm (operands[3]))
{
if (satisfies_constraint_Wb1 (operands[1]))
{
/* Case 1: vmv.s.x (TA, x == memory) ==> vlse.v (TA) */
if (satisfies_constraint_vu (operands[2]))
operands[1] = CONSTM1_RTX (mode);
else if (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (Pmode))
{
/* Case 2: vmv.s.x (TU, x == memory) ==>
vl = 0 or 1; + vlse.v (TU) in RV32 system */
operands[4] = riscv_vector::gen_avl_for_scalar_move (operands[4]);
operands[1] = CONSTM1_RTX (mode);
}
else
/* Case 3: load x (memory) to register. */
operands[3] = force_reg (mode, operands[3]);
}
}
else if (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (Pmode)
&& (immediate_operand (operands[3], Pmode)
|| (CONST_POLY_INT_P (operands[3])
&& known_ge (rtx_to_poly_int64 (operands[3]), 0U)
&& known_le (rtx_to_poly_int64 (operands[3]), GET_MODE_SIZE (mode)))))
{
rtx tmp = gen_reg_rtx (Pmode);
poly_int64 value = rtx_to_poly_int64 (operands[3]);
emit_move_insn (tmp, gen_int_mode (value, Pmode));
operands[3] = gen_rtx_SIGN_EXTEND (mode, tmp);
}
else
operands[3] = force_reg (mode, operands[3]);
})
(define_insn_and_split "*pred_broadcast"
[(set (match_operand:V_VLSI 0 "register_operand" "=vr, vr, vd, vd, vr, vr, vr, vr")
(if_then_else:V_VLSI
(unspec:
[(match_operand: 1 "vector_broadcast_mask_operand" "Wc1,Wc1, vm, vm,Wc1,Wc1,Wb1,Wb1")
(match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK")
(match_operand 5 "const_int_operand" " i, i, i, i, i, i, i, i")
(match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i")
(match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(vec_duplicate:V_VLSI
(match_operand: 3 "direct_broadcast_operand" " r, r,Wdm,Wdm,Wdm,Wdm, r, r"))
(match_operand:V_VLSI 2 "vector_merge_operand" "vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"@
vmv.v.x\t%0,%3
vmv.v.x\t%0,%3
vlse.v\t%0,%3,zero,%1.t
vlse.v\t%0,%3,zero,%1.t
vlse.v\t%0,%3,zero
vlse.v\t%0,%3,zero
vmv.s.x\t%0,%3
vmv.s.x\t%0,%3"
"(register_operand (operands[3], mode)
|| CONST_POLY_INT_P (operands[3]))
&& GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (Pmode)"
[(set (match_dup 0)
(if_then_else:V_VLSI (unspec: [(match_dup 1) (match_dup 4)
(match_dup 5) (match_dup 6) (match_dup 7)
(reg:SI VL_REGNUM) (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(vec_duplicate:V_VLSI (match_dup 3))
(match_dup 2)))]
{
gcc_assert (can_create_pseudo_p ());
if (CONST_POLY_INT_P (operands[3]))
{
rtx tmp = gen_reg_rtx (mode);
emit_move_insn (tmp, operands[3]);
operands[3] = tmp;
}
rtx m = assign_stack_local (mode, GET_MODE_SIZE (mode),
GET_MODE_ALIGNMENT (mode));
m = validize_mem (m);
emit_move_insn (m, operands[3]);
m = gen_rtx_MEM (mode, force_reg (Pmode, XEXP (m, 0)));
operands[3] = m;
/* For SEW = 64 in RV32 system, we expand vmv.s.x:
andi a2,a2,1
vsetvl zero,a2,e64
vlse64.v */
if (satisfies_constraint_Wb1 (operands[1]))
{
operands[4] = riscv_vector::gen_avl_for_scalar_move (operands[4]);
operands[1] = CONSTM1_RTX (mode);
}
}
[(set_attr "type" "vimov,vimov,vlds,vlds,vlds,vlds,vimovxv,vimovxv")
(set_attr "mode" "")])
(define_insn "*pred_broadcast"
[(set (match_operand:V_VLSF_ZVFHMIN 0 "register_operand" "=vr, vr, vr, vr, vr, vr, vr, vr")
(if_then_else:V_VLSF_ZVFHMIN
(unspec:
[(match_operand: 1 "vector_broadcast_mask_operand" "Wc1,Wc1, vm, vm,Wc1,Wc1,Wb1,Wb1")
(match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK")
(match_operand 5 "const_int_operand" " i, i, i, i, i, i, i, i")
(match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i")
(match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(vec_duplicate:V_VLSF_ZVFHMIN
(match_operand: 3 "direct_broadcast_operand" " f, f,Wdm,Wdm,Wdm,Wdm, f, f"))
(match_operand:V_VLSF_ZVFHMIN 2 "vector_merge_operand" "vu, 0, vu, 0, vu, 0, vu, 0")))]
"TARGET_VECTOR"
"@
vfmv.v.f\t%0,%3
vfmv.v.f\t%0,%3
vlse.v\t%0,%3,zero,%1.t
vlse.v\t%0,%3,zero,%1.t
vlse.v\t%0,%3,zero
vlse.v\t%0,%3,zero
vfmv.s.f\t%0,%3
vfmv.s.f\t%0,%3"
[(set_attr "type" "vfmov,vfmov,vlds,vlds,vlds,vlds,vfmovfv,vfmovfv")
(set_attr "mode" "")])
(define_insn "*pred_broadcast_extended_scalar"
[(set (match_operand:V_VLSI_D 0 "register_operand" "=vr, vr, vr, vr")
(if_then_else:V_VLSI_D
(unspec:
[(match_operand: 1 "vector_broadcast_mask_operand" "Wc1,Wc1,Wb1,Wb1")
(match_operand 4 "vector_length_operand" " rK, rK, rK, rK")
(match_operand 5 "const_int_operand" " i, i, i, i")
(match_operand 6 "const_int_operand" " i, i, i, i")
(match_operand 7 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(vec_duplicate:V_VLSI_D
(sign_extend:
(match_operand: 3 "register_operand" " r, r, r, r")))
(match_operand:V_VLSI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
"TARGET_VECTOR"
"@
vmv.v.x\t%0,%3
vmv.v.x\t%0,%3
vmv.s.x\t%0,%3
vmv.s.x\t%0,%3"
[(set_attr "type" "vimov,vimov,vimovxv,vimovxv")
(set_attr "mode" "")])
(define_insn "*pred_broadcast_zero"
[(set (match_operand:V_VLS 0 "register_operand" "=vr, vr")
(if_then_else:V_VLS
(unspec:
[(match_operand: 1 "vector_least_significant_set_mask_operand" "Wb1, Wb1")
(match_operand 4 "vector_length_operand" " rK, rK")
(match_operand 5 "const_int_operand" " i, i")
(match_operand 6 "const_int_operand" " i, i")
(match_operand 7 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(match_operand:V_VLS 3 "vector_const_0_operand" "Wc0, Wc0")
(match_operand:V_VLS 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
"vmv.s.x\t%0,zero"
[(set_attr "type" "vimovxv,vimovxv")
(set_attr "mode" "")])
;; Because (vec_duplicate imm) will be converted to (const_vector imm),
;; This pattern is used to handle this case.
(define_insn "*pred_broadcast_imm"
[(set (match_operand:V_VLS 0 "register_operand" "=vr, vr")
(if_then_else:V_VLS
(unspec:
[(match_operand: 1 "vector_all_trues_mask_operand" " Wc1, Wc1")
(match_operand 4 "vector_length_operand" " rK, rK")
(match_operand 5 "const_int_operand" " i, i")
(match_operand 6 "const_int_operand" " i, i")
(match_operand 7 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(match_operand:V_VLS 3 "vector_const_int_or_double_0_operand" "viWc0, viWc0")
(match_operand:V_VLS 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
"vmv.v.i\t%0,%v3"
[(set_attr "type" "vimov,vimov")
(set_attr "mode" "")])
;; -------------------------------------------------------------------------------
;; ---- Predicated Strided loads/stores
;; -------------------------------------------------------------------------------
;; Includes:
;; - 7.5. Vector Strided Instructions
;; -------------------------------------------------------------------------------
(define_insn "@pred_strided_load"
[(set (match_operand:V 0 "register_operand" "=vr, vr, vd, vr, vr, vd")
(if_then_else:V
(unspec:
[(match_operand: 1 "vector_mask_operand" "vmWc1, Wc1, vm, vmWc1, Wc1, vm")
(match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK")
(match_operand 6 "const_int_operand" " i, i, i, i, i, i")
(match_operand 7 "const_int_operand" " i, i, i, i, i, i")
(match_operand 8 "const_int_operand" " i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:V
[(match_operand:V 3 "memory_operand" " m, m, m, m, m, m")
(match_operand 4 "" "")] UNSPEC_STRIDED)
(match_operand:V 2 "vector_merge_operand" " 0, vu, vu, 0, vu, vu")))]
"TARGET_VECTOR"
"@
vlse.v\t%0,%3,%z4%p1
vlse.v\t%0,%3,%z4
vlse.v\t%0,%3,%z4,%1.t
vle.v\t%0,%3%p1
vle.v\t%0,%3
vle.v\t%0,%3,%1.t"
[(set_attr "type" "vlds")
(set_attr "mode" "")])
(define_insn "@pred_strided_store"
[(set (match_operand:V 0 "memory_operand" "+m, m")
(if_then_else:V
(unspec:
[(match_operand: 1 "vector_mask_operand" "vmWc1, vmWc1")
(match_operand 4 "vector_length_operand" " rK, rK")
(match_operand 5 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:V
[(match_operand 2 "" "")
(match_operand:V 3 "register_operand" " vr, vr")] UNSPEC_STRIDED)
(match_dup 0)))]
"TARGET_VECTOR"
"@
vsse.v\t%3,%0,%z2%p1
vse.v\t%3,%0%p1"
[(set_attr "type" "vsts")
(set_attr "mode" "")
(set (attr "avl_type_idx") (const_int 5))])
;; -------------------------------------------------------------------------------
;; ---- Predicated indexed loads/stores
;; -------------------------------------------------------------------------------
;; Includes:
;; - 7.6. Vector Indexed Instructions
;; -------------------------------------------------------------------------------
;; DEST eew is same as SOURCE eew, DEST register can overlap SOURCE.
(define_insn "@pred_indexed_load_same_eew"
[(set (match_operand:VINDEXED 0 "register_operand" "=vd, vr,vd, vr")
(if_then_else:VINDEXED
(unspec:
[(match_operand: 1 "vector_mask_operand" " vm,Wc1,vm,Wc1")
(match_operand 5 "vector_length_operand" " rK, rK,rK, rK")
(match_operand 6 "const_int_operand" " i, i, i, i")
(match_operand 7 "const_int_operand" " i, i, i, i")
(match_operand 8 "const_int_operand" " i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VINDEXED
[(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ,rJ, rJ")
(mem:BLK (scratch))
(match_operand: 4 "register_operand" " vr, vr,vr, vr")] ORDER)
(match_operand:VINDEXED 2 "vector_merge_operand" " vu, vu, 0, 0")))]
"TARGET_VECTOR"
"vlxei.v\t%0,(%z3),%4%p1"
[(set_attr "type" "vldx")
(set_attr "mode" "")])
;; DEST eew is greater than SOURCE eew.
(define_insn "@pred_indexed_load_x2_greater_eew"
[(set (match_operand:VEEWEXT2 0 "register_operand" "=&vr, &vr")
(if_then_else:VEEWEXT2
(unspec:
[(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1")
(match_operand 5 "vector_length_operand" " rK, rK")
(match_operand 6 "const_int_operand" " i, i")
(match_operand 7 "const_int_operand" " i, i")
(match_operand 8 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT2
[(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
(mem:BLK (scratch))
(match_operand: 4 "register_operand" " vr, vr")] ORDER)
(match_operand:VEEWEXT2 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
"vlxei.v\t%0,(%z3),%4%p1"
[(set_attr "type" "vldx")
(set_attr "mode" "")])
(define_insn "@pred_indexed_load_x4_greater_eew"
[(set (match_operand:VEEWEXT4 0 "register_operand" "=&vr, &vr")
(if_then_else:VEEWEXT4
(unspec:
[(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1")
(match_operand 5 "vector_length_operand" " rK, rK")
(match_operand 6 "const_int_operand" " i, i")
(match_operand 7 "const_int_operand" " i, i")
(match_operand 8 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT4
[(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
(mem:BLK (scratch))
(match_operand: 4 "register_operand" " vr, vr")] ORDER)
(match_operand:VEEWEXT4 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
"vlxei.v\t%0,(%z3),%4%p1"
[(set_attr "type" "vldx")
(set_attr "mode" "")])
(define_insn "@pred_indexed_load_x8_greater_eew"
[(set (match_operand:VEEWEXT8 0 "register_operand" "=&vr, &vr")
(if_then_else:VEEWEXT8
(unspec:
[(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1")
(match_operand 5 "vector_length_operand" " rK, rK")
(match_operand 6 "const_int_operand" " i, i")
(match_operand 7 "const_int_operand" " i, i")
(match_operand 8 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT8
[(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
(mem:BLK (scratch))
(match_operand: 4 "register_operand" " vr, vr")] ORDER)
(match_operand:VEEWEXT8 2 "vector_merge_operand" " vu, 0")))]
"TARGET_VECTOR"
"vlxei.v\t%0,(%z3),%4%p1"
[(set_attr "type" "vld