diff options
author | Fraser Cormack <fraser@codeplay.com> | 2021-03-24 14:50:21 +0000 |
---|---|---|
committer | Fraser Cormack <fraser@codeplay.com> | 2021-03-25 10:41:40 +0000 |
commit | 1e56e8717f09cc287d2c1329d4009ae38acfa54c (patch) | |
tree | d0fd20f8da6f7931b384a9e6ba9a19e5ecec134e | |
parent | 321a71a77268c314c769a98d62c14609aff306e0 (diff) | |
download | llvm-1e56e8717f09cc287d2c1329d4009ae38acfa54c.zip llvm-1e56e8717f09cc287d2c1329d4009ae38acfa54c.tar.gz llvm-1e56e8717f09cc287d2c1329d4009ae38acfa54c.tar.bz2 |
[RISCV] Pre-commit shuffle test cases for D99270
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll | 300 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll | 170 |
2 files changed, 470 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll new file mode 100644 index 0000000..7c26997 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll @@ -0,0 +1,300 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +define <4 x half> @shuffle_v4f16(<4 x half> %x, <4 x half> %y) { +; CHECK-LABEL: shuffle_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu +; CHECK-NEXT: vslidedown.vi v25, v8, 3 +; CHECK-NEXT: vfmv.f.s ft0, v25 +; CHECK-NEXT: vsetivli a0, 2, e16,m1,ta,mu +; CHECK-NEXT: vfmv.v.f v25, ft0 +; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu +; CHECK-NEXT: vslidedown.vi v26, v9, 2 +; CHECK-NEXT: vfmv.f.s ft0, v26 +; CHECK-NEXT: vsetivli a0, 2, e16,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v25, ft0 +; CHECK-NEXT: addi a0, sp, 12 +; CHECK-NEXT: vse16.v v25, (a0) +; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu +; CHECK-NEXT: vslidedown.vi v25, v8, 1 +; CHECK-NEXT: vfmv.f.s ft0, v25 +; CHECK-NEXT: vsetivli a0, 2, e16,m1,ta,mu +; CHECK-NEXT: vfmv.v.f v25, ft0 +; CHECK-NEXT: vfmv.f.s ft0, v8 +; CHECK-NEXT: vfmv.s.f v25, ft0 +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vse16.v v25, (a0) +; CHECK-NEXT: vsetivli a0, 4, e16,m1,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %s = shufflevector <4 x half> %x, <4 x half> %y, <4 x i32> <i32 0, i32 1, i32 6, i32 3> + ret <4 x half> %s +} + +define <8 x float> @shuffle_v8f32(<8 x float> %x, <8 x float> %y) { +; RV32-LABEL: shuffle_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -64 +; RV32-NEXT: .cfi_def_cfa_offset 64 +; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 64 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: andi sp, sp, -32 +; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV32-NEXT: vslidedown.vi v26, v8, 7 +; RV32-NEXT: vfmv.f.s ft0, v26 +; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV32-NEXT: vfmv.v.f v25, ft0 +; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV32-NEXT: vslidedown.vi v26, v8, 6 +; RV32-NEXT: vfmv.f.s ft0, v26 +; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV32-NEXT: vfmv.s.f v25, ft0 +; RV32-NEXT: addi a0, sp, 24 +; RV32-NEXT: vse32.v v25, (a0) +; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV32-NEXT: vslidedown.vi v26, v8, 5 +; RV32-NEXT: vfmv.f.s ft0, v26 +; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV32-NEXT: vfmv.v.f v25, ft0 +; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV32-NEXT: vslidedown.vi v26, v10, 4 +; RV32-NEXT: vfmv.f.s ft0, v26 +; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV32-NEXT: vfmv.s.f v25, ft0 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vse32.v v25, (a0) +; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV32-NEXT: vslidedown.vi v26, v8, 3 +; RV32-NEXT: vfmv.f.s ft0, v26 +; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV32-NEXT: vfmv.v.f v25, ft0 +; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV32-NEXT: vslidedown.vi v26, v8, 2 +; RV32-NEXT: vfmv.f.s ft0, v26 +; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV32-NEXT: vfmv.s.f v25, ft0 +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vse32.v v25, (a0) +; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV32-NEXT: vslidedown.vi v26, v10, 1 +; RV32-NEXT: vfmv.f.s ft0, v26 +; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV32-NEXT: vfmv.v.f v25, ft0 +; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu +; RV32-NEXT: vfmv.f.s ft0, v10 +; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV32-NEXT: vfmv.s.f v25, ft0 +; RV32-NEXT: vse32.v v25, (sp) +; RV32-NEXT: vsetivli a0, 8, e32,m2,ta,mu +; RV32-NEXT: vle32.v v8, (sp) +; RV32-NEXT: addi sp, s0, -64 +; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 64 +; RV32-NEXT: ret +; +; RV64-LABEL: shuffle_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -64 +; RV64-NEXT: .cfi_def_cfa_offset 64 +; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 +; RV64-NEXT: addi s0, sp, 64 +; RV64-NEXT: .cfi_def_cfa s0, 0 +; RV64-NEXT: andi sp, sp, -32 +; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV64-NEXT: vslidedown.vi v26, v8, 7 +; RV64-NEXT: vfmv.f.s ft0, v26 +; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV64-NEXT: vfmv.v.f v25, ft0 +; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV64-NEXT: vslidedown.vi v26, v8, 6 +; RV64-NEXT: vfmv.f.s ft0, v26 +; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV64-NEXT: vfmv.s.f v25, ft0 +; RV64-NEXT: addi a0, sp, 24 +; RV64-NEXT: vse32.v v25, (a0) +; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV64-NEXT: vslidedown.vi v26, v8, 5 +; RV64-NEXT: vfmv.f.s ft0, v26 +; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV64-NEXT: vfmv.v.f v25, ft0 +; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV64-NEXT: vslidedown.vi v26, v10, 4 +; RV64-NEXT: vfmv.f.s ft0, v26 +; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV64-NEXT: vfmv.s.f v25, ft0 +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vse32.v v25, (a0) +; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV64-NEXT: vslidedown.vi v26, v8, 3 +; RV64-NEXT: vfmv.f.s ft0, v26 +; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV64-NEXT: vfmv.v.f v25, ft0 +; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV64-NEXT: vslidedown.vi v26, v8, 2 +; RV64-NEXT: vfmv.f.s ft0, v26 +; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV64-NEXT: vfmv.s.f v25, ft0 +; RV64-NEXT: addi a0, sp, 8 +; RV64-NEXT: vse32.v v25, (a0) +; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV64-NEXT: vslidedown.vi v26, v10, 1 +; RV64-NEXT: vfmv.f.s ft0, v26 +; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV64-NEXT: vfmv.v.f v25, ft0 +; RV64-NEXT: vsetvli zero, zero, e32,m2,ta,mu +; RV64-NEXT: vfmv.f.s ft0, v10 +; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu +; RV64-NEXT: vfmv.s.f v25, ft0 +; RV64-NEXT: vse32.v v25, (sp) +; RV64-NEXT: vsetivli a0, 8, e32,m2,ta,mu +; RV64-NEXT: vle32.v v8, (sp) +; RV64-NEXT: addi sp, s0, -64 +; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 64 +; RV64-NEXT: ret + %s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 8, i32 9, i32 2, i32 3, i32 12, i32 5, i32 6, i32 7> + ret <8 x float> %s +} + +define <4 x double> @shuffle_fv_v4i16(<4 x double> %x) { +; RV32-LABEL: shuffle_fv_v4i16: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -64 +; RV32-NEXT: .cfi_def_cfa_offset 64 +; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 64 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: andi sp, sp, -32 +; RV32-NEXT: lui a0, %hi(.LCPI2_0) +; RV32-NEXT: fld ft0, %lo(.LCPI2_0)(a0) +; RV32-NEXT: fsd ft0, 24(sp) +; RV32-NEXT: fsd ft0, 0(sp) +; RV32-NEXT: vsetivli a0, 1, e64,m2,ta,mu +; RV32-NEXT: vslidedown.vi v26, v8, 2 +; RV32-NEXT: vfmv.f.s ft0, v26 +; RV32-NEXT: fsd ft0, 16(sp) +; RV32-NEXT: vslidedown.vi v26, v8, 1 +; RV32-NEXT: vfmv.f.s ft0, v26 +; RV32-NEXT: fsd ft0, 8(sp) +; RV32-NEXT: vsetivli a0, 4, e64,m2,ta,mu +; RV32-NEXT: vle64.v v8, (sp) +; RV32-NEXT: addi sp, s0, -64 +; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 64 +; RV32-NEXT: ret +; +; RV64-LABEL: shuffle_fv_v4i16: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -64 +; RV64-NEXT: .cfi_def_cfa_offset 64 +; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 +; RV64-NEXT: addi s0, sp, 64 +; RV64-NEXT: .cfi_def_cfa s0, 0 +; RV64-NEXT: andi sp, sp, -32 +; RV64-NEXT: lui a0, %hi(.LCPI2_0) +; RV64-NEXT: fld ft0, %lo(.LCPI2_0)(a0) +; RV64-NEXT: fsd ft0, 24(sp) +; RV64-NEXT: fsd ft0, 0(sp) +; RV64-NEXT: vsetivli a0, 1, e64,m2,ta,mu +; RV64-NEXT: vslidedown.vi v26, v8, 2 +; RV64-NEXT: vfmv.f.s ft0, v26 +; RV64-NEXT: fsd ft0, 16(sp) +; RV64-NEXT: vslidedown.vi v26, v8, 1 +; RV64-NEXT: vfmv.f.s ft0, v26 +; RV64-NEXT: fsd ft0, 8(sp) +; RV64-NEXT: vsetivli a0, 4, e64,m2,ta,mu +; RV64-NEXT: vle64.v v8, (sp) +; RV64-NEXT: addi sp, s0, -64 +; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 64 +; RV64-NEXT: ret + %s = shufflevector <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x double> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 3> + ret <4 x double> %s +} + +define <4 x double> @shuffle_vf_v4i16(<4 x double> %x) { +; RV32-LABEL: shuffle_vf_v4i16: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -64 +; RV32-NEXT: .cfi_def_cfa_offset 64 +; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 64 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: andi sp, sp, -32 +; RV32-NEXT: lui a0, %hi(.LCPI3_0) +; RV32-NEXT: fld ft0, %lo(.LCPI3_0)(a0) +; RV32-NEXT: fsd ft0, 16(sp) +; RV32-NEXT: fsd ft0, 8(sp) +; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu +; RV32-NEXT: vfmv.f.s ft0, v8 +; RV32-NEXT: fsd ft0, 0(sp) +; RV32-NEXT: vsetivli a0, 1, e64,m2,ta,mu +; RV32-NEXT: vslidedown.vi v26, v8, 3 +; RV32-NEXT: vfmv.f.s ft0, v26 +; RV32-NEXT: fsd ft0, 24(sp) +; RV32-NEXT: vsetivli a0, 4, e64,m2,ta,mu +; RV32-NEXT: vle64.v v8, (sp) +; RV32-NEXT: addi sp, s0, -64 +; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 64 +; RV32-NEXT: ret +; +; RV64-LABEL: shuffle_vf_v4i16: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -64 +; RV64-NEXT: .cfi_def_cfa_offset 64 +; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 +; RV64-NEXT: addi s0, sp, 64 +; RV64-NEXT: .cfi_def_cfa s0, 0 +; RV64-NEXT: andi sp, sp, -32 +; RV64-NEXT: lui a0, %hi(.LCPI3_0) +; RV64-NEXT: fld ft0, %lo(.LCPI3_0)(a0) +; RV64-NEXT: fsd ft0, 16(sp) +; RV64-NEXT: fsd ft0, 8(sp) +; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu +; RV64-NEXT: vfmv.f.s ft0, v8 +; RV64-NEXT: fsd ft0, 0(sp) +; RV64-NEXT: vsetivli a0, 1, e64,m2,ta,mu +; RV64-NEXT: vslidedown.vi v26, v8, 3 +; RV64-NEXT: vfmv.f.s ft0, v26 +; RV64-NEXT: fsd ft0, 24(sp) +; RV64-NEXT: vsetivli a0, 4, e64,m2,ta,mu +; RV64-NEXT: vle64.v v8, (sp) +; RV64-NEXT: addi sp, s0, -64 +; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 64 +; RV64-NEXT: ret + %s = shufflevector <4 x double> %x, <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x i32> <i32 0, i32 5, i32 6, i32 3> + ret <4 x double> %s +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll new file mode 100644 index 0000000..a983515 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -0,0 +1,170 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +define <4 x i16> @shuffle_v4i16(<4 x i16> %x, <4 x i16> %y) { +; CHECK-LABEL: shuffle_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: sh a0, 8(sp) +; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu +; CHECK-NEXT: vslidedown.vi v25, v8, 3 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: sh a0, 14(sp) +; CHECK-NEXT: vslidedown.vi v25, v9, 2 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: sh a0, 12(sp) +; CHECK-NEXT: vslidedown.vi v25, v8, 1 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: sh a0, 10(sp) +; CHECK-NEXT: vsetivli a0, 4, e16,m1,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %s = shufflevector <4 x i16> %x, <4 x i16> %y, <4 x i32> <i32 0, i32 1, i32 6, i32 3> + ret <4 x i16> %s +} + +define <8 x i32> @shuffle_v8i32(<8 x i32> %x, <8 x i32> %y) { +; RV32-LABEL: shuffle_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -64 +; RV32-NEXT: .cfi_def_cfa_offset 64 +; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 64 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: andi sp, sp, -32 +; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu +; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: sw a0, 0(sp) +; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV32-NEXT: vslidedown.vi v26, v8, 7 +; RV32-NEXT: vmv.x.s a0, v26 +; RV32-NEXT: sw a0, 28(sp) +; RV32-NEXT: vslidedown.vi v26, v8, 6 +; RV32-NEXT: vmv.x.s a0, v26 +; RV32-NEXT: sw a0, 24(sp) +; RV32-NEXT: vslidedown.vi v26, v10, 5 +; RV32-NEXT: vmv.x.s a0, v26 +; RV32-NEXT: sw a0, 20(sp) +; RV32-NEXT: vslidedown.vi v26, v10, 4 +; RV32-NEXT: vmv.x.s a0, v26 +; RV32-NEXT: sw a0, 16(sp) +; RV32-NEXT: vslidedown.vi v26, v8, 3 +; RV32-NEXT: vmv.x.s a0, v26 +; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: vslidedown.vi v26, v10, 2 +; RV32-NEXT: vmv.x.s a0, v26 +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vslidedown.vi v26, v8, 1 +; RV32-NEXT: vmv.x.s a0, v26 +; RV32-NEXT: sw a0, 4(sp) +; RV32-NEXT: vsetivli a0, 8, e32,m2,ta,mu +; RV32-NEXT: vle32.v v8, (sp) +; RV32-NEXT: addi sp, s0, -64 +; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 64 +; RV32-NEXT: ret +; +; RV64-LABEL: shuffle_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -64 +; RV64-NEXT: .cfi_def_cfa_offset 64 +; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 +; RV64-NEXT: addi s0, sp, 64 +; RV64-NEXT: .cfi_def_cfa s0, 0 +; RV64-NEXT: andi sp, sp, -32 +; RV64-NEXT: vsetvli zero, zero, e32,m2,ta,mu +; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: sw a0, 0(sp) +; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu +; RV64-NEXT: vslidedown.vi v26, v8, 7 +; RV64-NEXT: vmv.x.s a0, v26 +; RV64-NEXT: sw a0, 28(sp) +; RV64-NEXT: vslidedown.vi v26, v8, 6 +; RV64-NEXT: vmv.x.s a0, v26 +; RV64-NEXT: sw a0, 24(sp) +; RV64-NEXT: vslidedown.vi v26, v10, 5 +; RV64-NEXT: vmv.x.s a0, v26 +; RV64-NEXT: sw a0, 20(sp) +; RV64-NEXT: vslidedown.vi v26, v10, 4 +; RV64-NEXT: vmv.x.s a0, v26 +; RV64-NEXT: sw a0, 16(sp) +; RV64-NEXT: vslidedown.vi v26, v8, 3 +; RV64-NEXT: vmv.x.s a0, v26 +; RV64-NEXT: sw a0, 12(sp) +; RV64-NEXT: vslidedown.vi v26, v10, 2 +; RV64-NEXT: vmv.x.s a0, v26 +; RV64-NEXT: sw a0, 8(sp) +; RV64-NEXT: vslidedown.vi v26, v8, 1 +; RV64-NEXT: vmv.x.s a0, v26 +; RV64-NEXT: sw a0, 4(sp) +; RV64-NEXT: vsetivli a0, 8, e32,m2,ta,mu +; RV64-NEXT: vle32.v v8, (sp) +; RV64-NEXT: addi sp, s0, -64 +; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 64 +; RV64-NEXT: ret + %s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 1, i32 10, i32 3, i32 12, i32 13, i32 6, i32 7> + ret <8 x i32> %s +} + +define <4 x i16> @shuffle_xv_v4i16(<4 x i16> %x) { +; CHECK-LABEL: shuffle_xv_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: addi a0, zero, 5 +; CHECK-NEXT: sh a0, 14(sp) +; CHECK-NEXT: sh a0, 8(sp) +; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu +; CHECK-NEXT: vslidedown.vi v25, v8, 2 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: sh a0, 12(sp) +; CHECK-NEXT: vslidedown.vi v25, v8, 1 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: sh a0, 10(sp) +; CHECK-NEXT: vsetivli a0, 4, e16,m1,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %s = shufflevector <4 x i16> <i16 5, i16 5, i16 5, i16 5>, <4 x i16> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 3> + ret <4 x i16> %s +} + +define <4 x i16> @shuffle_vx_v4i16(<4 x i16> %x) { +; CHECK-LABEL: shuffle_vx_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: addi a0, zero, 5 +; CHECK-NEXT: sh a0, 12(sp) +; CHECK-NEXT: sh a0, 10(sp) +; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: sh a0, 8(sp) +; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu +; CHECK-NEXT: vslidedown.vi v25, v8, 3 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: sh a0, 14(sp) +; CHECK-NEXT: vsetivli a0, 4, e16,m1,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %s = shufflevector <4 x i16> %x, <4 x i16> <i16 5, i16 5, i16 5, i16 5>, <4 x i32> <i32 0, i32 5, i32 6, i32 3> + ret <4 x i16> %s +} |