aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll
blob: d5aad7670cf7a55c7c763af8e88de9b38eb3b413 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -aarch64-sve-vector-bits-min=512 -aarch64-sve-vector-bits-max=512  < %s | FileCheck %s

target triple = "aarch64-unknown-linux-gnu"

define void @add_v64i8(ptr %a, ptr %b) #0 {
; CHECK-LABEL: add_v64i8:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr z0, [x0]
; CHECK-NEXT:    ldr z1, [x1]
; CHECK-NEXT:    add z0.b, z0.b, z1.b
; CHECK-NEXT:    str z0, [x0]
; CHECK-NEXT:    ret
  %op1 = load <64 x i8>, ptr %a
  %op2 = load <64 x i8>, ptr %b
  %res = add <64 x i8> %op1, %op2
  store <64 x i8> %res, ptr %a
  ret void
}

define void @add_v32i16(ptr %a, ptr %b, ptr %c) #0 {
; CHECK-LABEL: add_v32i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr z0, [x0]
; CHECK-NEXT:    ldr z1, [x1]
; CHECK-NEXT:    add z0.h, z0.h, z1.h
; CHECK-NEXT:    str z0, [x0]
; CHECK-NEXT:    ret
  %op1 = load <32 x i16>, ptr %a
  %op2 = load <32 x i16>, ptr %b
  %res = add <32 x i16> %op1, %op2
  store <32 x i16> %res, ptr %a
  ret void
}

define void @abs_v16i32(ptr %a) #0 {
; CHECK-LABEL: abs_v16i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr z0, [x0]
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    abs z0.s, p0/m, z0.s
; CHECK-NEXT:    str z0, [x0]
; CHECK-NEXT:    ret
  %op1 = load <16 x i32>, ptr %a
  %res = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %op1, i1 false)
  store <16 x i32> %res, ptr %a
  ret void
}

define void @abs_v8i64(ptr %a) #0 {
; CHECK-LABEL: abs_v8i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr z0, [x0]
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    abs z0.d, p0/m, z0.d
; CHECK-NEXT:    str z0, [x0]
; CHECK-NEXT:    ret
  %op1 = load <8 x i64>, ptr %a
  %res = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %op1, i1 false)
  store <8 x i64> %res, ptr %a
  ret void
}

define void @fadd_v32f16(ptr %a, ptr %b) #0 {
; CHECK-LABEL: fadd_v32f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr z0, [x0]
; CHECK-NEXT:    ldr z1, [x1]
; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
; CHECK-NEXT:    str z0, [x0]
; CHECK-NEXT:    ret
  %op1 = load <32 x half>, ptr %a
  %op2 = load <32 x half>, ptr %b
  %res = fadd <32 x half> %op1, %op2
  store <32 x half> %res, ptr %a
  ret void
}

define void @fadd_v16f32(ptr %a, ptr %b) #0 {
; CHECK-LABEL: fadd_v16f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr z0, [x0]
; CHECK-NEXT:    ldr z1, [x1]
; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
; CHECK-NEXT:    str z0, [x0]
; CHECK-NEXT:    ret
  %op1 = load <16 x float>, ptr %a
  %op2 = load <16 x float>, ptr %b
  %res = fadd <16 x float> %op1, %op2
  store <16 x float> %res, ptr %a
  ret void
}

define void @fadd_v8f64(ptr %a, ptr %b) #0 {
; CHECK-LABEL: fadd_v8f64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr z0, [x0]
; CHECK-NEXT:    ldr z1, [x1]
; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
; CHECK-NEXT:    str z0, [x0]
; CHECK-NEXT:    ret
  %op1 = load <8 x double>, ptr %a
  %op2 = load <8 x double>, ptr %b
  %res = fadd <8 x double> %op1, %op2
  store <8 x double> %res, ptr %a
  ret void
}

declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
declare <8 x i64> @llvm.abs.v8i64(<8 x i64>, i1)

attributes #0 = { "target-features"="+sve" }