aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
blob: e5ab956d09e8ac65008d217b841fbf06ae7707c6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s

; These tests are here to ensure we don't get a selection error caused
; by performPostLD1Combine, which should bail out if the return
; type is not 128 or 64 bit vector.

define <vscale x 4 x i32> @test_post_ld1_insert(ptr %a, ptr %ptr, i64 %inc) {
; CHECK-LABEL: test_post_ld1_insert:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr s0, [x0]
; CHECK-NEXT:    add x8, x0, x2, lsl #2
; CHECK-NEXT:    str x8, [x1]
; CHECK-NEXT:    ret
  %load = load i32, ptr %a
  %ins = insertelement <vscale x 4 x i32> undef, i32 %load, i32 0
  %gep = getelementptr i32, ptr %a, i64 %inc
  store ptr %gep, ptr %ptr
  ret <vscale x 4 x i32> %ins
}

define <vscale x 2 x double> @test_post_ld1_dup(ptr %a, ptr %ptr, i64 %inc) {
; CHECK-LABEL: test_post_ld1_dup:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    add x8, x0, x2, lsl #3
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
; CHECK-NEXT:    str x8, [x1]
; CHECK-NEXT:    ret
  %load = load double, ptr %a
  %dup = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %load)
  %gep = getelementptr double, ptr %a, i64 %inc
  store ptr %gep, ptr %ptr
  ret <vscale x 2 x double> %dup
}

define void @test_post_ld1_int_fixed(ptr %data, i64 %idx, ptr %addr, ptr %res_ptr)  #1 {
; CHECK-LABEL: test_post_ld1_int_fixed:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, #2 // =0x2
; CHECK-NEXT:    index z0.d, #0, #1
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov z1.d, x8
; CHECK-NEXT:    ldr z2, [x2]
; CHECK-NEXT:    ldr x8, [x0]
; CHECK-NEXT:    ptrue p1.d, vl1
; CHECK-NEXT:    ldr x9, [x0, x1, lsl #3]
; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
; CHECK-NEXT:    mov z0.d, z2.d
; CHECK-NEXT:    mov z0.d, p1/m, x8
; CHECK-NEXT:    mov z2.d, p0/m, x9
; CHECK-NEXT:    add z0.d, z0.d, z2.d
; CHECK-NEXT:    str z0, [x3]
; CHECK-NEXT:    ret
  %A = load <4 x i64>, ptr %addr
  %ld1 = load i64, ptr %data
  %vec1 = insertelement <4 x i64> %A, i64 %ld1, i32 0
  %gep = getelementptr i64, ptr %data, i64 %idx
  %ld2 = load i64, ptr %gep
  %vec2 = insertelement <4 x i64> %A, i64 %ld2, i32 2
  %res = add <4 x i64> %vec1, %vec2
  store <4 x i64> %res, ptr %res_ptr
  ret void
}

define void @test_post_ld1_double_fixed(ptr %data, i64 %idx, ptr %addr, ptr %res_ptr)  #1 {
; CHECK-LABEL: test_post_ld1_double_fixed:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, #2 // =0x2
; CHECK-NEXT:    index z0.d, #0, #1
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov z1.d, x8
; CHECK-NEXT:    ptrue p1.d, vl1
; CHECK-NEXT:    ldr d2, [x0, x1, lsl #3]
; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
; CHECK-NEXT:    ldr z0, [x2]
; CHECK-NEXT:    ldr d1, [x0]
; CHECK-NEXT:    sel z1.d, p1, z1.d, z0.d
; CHECK-NEXT:    mov z0.d, p0/m, d2
; CHECK-NEXT:    fadd z0.d, z1.d, z0.d
; CHECK-NEXT:    str z0, [x3]
; CHECK-NEXT:    ret
  %A = load <4 x double>, ptr %addr
  %ld1 = load double, ptr %data
  %vec1 = insertelement <4 x double> %A, double %ld1, i32 0
  %gep = getelementptr double, ptr %data, i64 %idx
  %ld2 = load double, ptr %gep
  %vec2 = insertelement <4 x double> %A, double %ld2, i32 2
  %res = fadd <4 x double> %vec1, %vec2
  store <4 x double> %res, ptr %res_ptr
  ret void
}
attributes #1 = { vscale_range(2,2) "target-features"="+neon,+sve" }

declare <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double)