; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+v -verify-machineinstrs \ ; RUN: -M no-aliases < %s | FileCheck %s target triple = "riscv64-unknown-unknown-elf" define target("riscv.vector.tuple", , 5) @load_store_m1x5(target("riscv.vector.tuple", , 5) %tuple) { ; CHECK-LABEL: load_store_m1x5: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrrs a0, vlenb, zero ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrrs a1, vlenb, zero ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: vs1r.v v9, (a2) ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vs1r.v v10, (a3) ; CHECK-NEXT: add a4, a3, a1 ; CHECK-NEXT: vs1r.v v11, (a4) ; CHECK-NEXT: add a1, a4, a1 ; CHECK-NEXT: vs1r.v v12, (a1) ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: vl1re8.v v8, (a0) ; CHECK-NEXT: vl1re8.v v9, (a2) ; CHECK-NEXT: vl1re8.v v10, (a3) ; CHECK-NEXT: vl1re8.v v11, (a4) ; CHECK-NEXT: vl1re8.v v12, (a1) ; CHECK-NEXT: csrrs a0, vlenb, zero ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %tuple.addr = alloca target("riscv.vector.tuple", , 5), align 1 store target("riscv.vector.tuple", , 5) %tuple, ptr %tuple.addr, align 1 call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %0 = load target("riscv.vector.tuple", , 5), ptr %tuple.addr, align 1 ret target("riscv.vector.tuple", , 5) %0 } define target("riscv.vector.tuple", , 2) @load_store_m2x2(target("riscv.vector.tuple", , 2) %tuple) { ; CHECK-LABEL: load_store_m2x2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrrs a0, vlenb, zero ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrrs a1, vlenb, zero ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: add a1, a0, a1 ; CHECK-NEXT: vs2r.v v10, (a1) ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: vl2re8.v v8, (a0) ; CHECK-NEXT: vl2re8.v v10, (a1) ; CHECK-NEXT: csrrs a0, vlenb, zero ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %tuple.addr = alloca target("riscv.vector.tuple", , 2), align 1 store target("riscv.vector.tuple", , 2) %tuple, ptr %tuple.addr, align 1 call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %0 = load target("riscv.vector.tuple", , 2), ptr %tuple.addr, align 1 ret target("riscv.vector.tuple", , 2) %0 } define target("riscv.vector.tuple", , 2) @load_store_m4x2(target("riscv.vector.tuple", , 2) %tuple) { ; CHECK-LABEL: load_store_m4x2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrrs a0, vlenb, zero ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrrs a1, vlenb, zero ; CHECK-NEXT: vs4r.v v8, (a0) ; CHECK-NEXT: slli a1, a1, 2 ; CHECK-NEXT: add a1, a0, a1 ; CHECK-NEXT: vs4r.v v12, (a1) ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: vl4re8.v v8, (a0) ; CHECK-NEXT: vl4re8.v v12, (a1) ; CHECK-NEXT: csrrs a0, vlenb, zero ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %tuple.addr = alloca target("riscv.vector.tuple", , 2), align 1 store target("riscv.vector.tuple", , 2) %tuple, ptr %tuple.addr, align 1 call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %0 = load target("riscv.vector.tuple", , 2), ptr %tuple.addr, align 1 ret target("riscv.vector.tuple", , 2) %0 }