aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AArch64/sve2-intrinsics-psel.ll
blob: 01f71428319147e3d5c40dfd658f44a3e6e1c0f8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,sme -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -force-streaming -verify-machineinstrs < %s | FileCheck %s

define <vscale x 16 x i1> @psel_b(<vscale x 16 x i1> %p1, <vscale x 16 x i1> %p2, i32 %idx) {
; CHECK-LABEL: psel_b:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w12, w0
; CHECK-NEXT:    psel p0, p0, p1.b[w12, 0]
; CHECK-NEXT:    ret
  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.psel.nxv16i1(<vscale x 16 x i1> %p1, <vscale x 16 x i1> %p2, i32 %idx)
  ret <vscale x 16 x i1> %res
}

define <vscale x 16 x i1> @psel_b_imm(<vscale x 16 x i1> %p1, <vscale x 16 x i1> %p2, i32 %idx) {
; CHECK-LABEL: psel_b_imm:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w12, w0
; CHECK-NEXT:    psel p0, p0, p1.b[w12, 15]
; CHECK-NEXT:    ret
  %add = add i32 %idx, 15
  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.psel.nxv16i1(<vscale x 16 x i1> %p1, <vscale x 16 x i1> %p2, i32 %add)
  ret <vscale x 16 x i1> %res
}

define <vscale x 16 x i1> @psel_h(<vscale x 16 x i1> %p1, <vscale x 8 x i1> %p2, i32 %idx) {
; CHECK-LABEL: psel_h:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w12, w0
; CHECK-NEXT:    psel p0, p0, p1.h[w12, 0]
; CHECK-NEXT:    ret
  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.psel.nxv8i1(<vscale x 16 x i1> %p1, <vscale x 8 x i1> %p2, i32 %idx)
  ret <vscale x 16 x i1> %res
}

define <vscale x 16 x i1> @psel_h_imm(<vscale x 16 x i1> %p1, <vscale x 8 x i1> %p2, i32 %idx) {
; CHECK-LABEL: psel_h_imm:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w12, w0
; CHECK-NEXT:    psel p0, p0, p1.h[w12, 7]
; CHECK-NEXT:    ret
  %add = add i32 %idx, 7
  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.psel.nxv8i1(<vscale x 16 x i1> %p1, <vscale x 8 x i1> %p2, i32 %add)
  ret <vscale x 16 x i1> %res
}

define <vscale x 16 x i1> @psel_s(<vscale x 16 x i1> %p1, <vscale x 4 x i1> %p2, i32 %idx) {
; CHECK-LABEL: psel_s:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w12, w0
; CHECK-NEXT:    psel p0, p0, p1.s[w12, 0]
; CHECK-NEXT:    ret
  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.psel.nxv4i1(<vscale x 16 x i1> %p1, <vscale x 4 x i1> %p2, i32 %idx)
  ret <vscale x 16 x i1> %res
}

define <vscale x 16 x i1> @psel_s_imm(<vscale x 16 x i1> %p1, <vscale x 4 x i1> %p2, i32 %idx) {
; CHECK-LABEL: psel_s_imm:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w12, w0
; CHECK-NEXT:    psel p0, p0, p1.s[w12, 3]
; CHECK-NEXT:    ret
  %add = add i32 %idx, 3
  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.psel.nxv4i1(<vscale x 16 x i1> %p1, <vscale x 4 x i1> %p2, i32 %add)
  ret <vscale x 16 x i1> %res
}

define <vscale x 16 x i1> @psel_d(<vscale x 16 x i1> %p1, <vscale x 2 x i1> %p2, i32 %idx) {
; CHECK-LABEL: psel_d:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w12, w0
; CHECK-NEXT:    psel p0, p0, p1.d[w12, 0]
; CHECK-NEXT:    ret
  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.psel.nxv2i1(<vscale x 16 x i1> %p1, <vscale x 2 x i1> %p2, i32 %idx)
  ret <vscale x 16 x i1> %res
}

define <vscale x 16 x i1> @psel_d_imm(<vscale x 16 x i1> %p1, <vscale x 2 x i1> %p2, i32 %idx) {
; CHECK-LABEL: psel_d_imm:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w12, w0
; CHECK-NEXT:    psel p0, p0, p1.d[w12, 1]
; CHECK-NEXT:    ret
  %add = add i32 %idx, 1
  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.psel.nxv2i1(<vscale x 16 x i1> %p1, <vscale x 2 x i1> %p2, i32 %add)
  ret <vscale x 16 x i1> %res
}

declare <vscale x 16 x i1> @llvm.aarch64.sve.psel.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, i32)
declare <vscale x 16 x i1>  @llvm.aarch64.sve.psel.nxv8i1(<vscale x 16 x i1>, <vscale x 8 x i1>, i32)
declare <vscale x 16 x i1>  @llvm.aarch64.sve.psel.nxv4i1(<vscale x 16 x i1>, <vscale x 4 x i1>, i32)
declare <vscale x 16 x i1>  @llvm.aarch64.sve.psel.nxv2i1(<vscale x 16 x i1>, <vscale x 2 x i1>, i32)