aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
blob: d65e87d5b97567f02e2bd1993eece35c432dbdca (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mattr=+sve -force-streaming-compatible  < %s | FileCheck %s
; RUN: llc -mattr=+sme -force-streaming-compatible  < %s | FileCheck %s
; RUN: llc -force-streaming-compatible < %s | FileCheck %s --check-prefix=NONEON-NOSVE

target triple = "aarch64"

;
; NOTE: SVE lowering for the BSP pseudoinst is not currently implemented, so we
;       don't currently expect the code below to lower to BSL/BIT/BIF. Once
;       this is implemented, this test will be fleshed out.
;

define <8 x i32> @fixed_bitselect_v8i32(ptr %pre_cond_ptr, ptr %left_ptr, ptr %right_ptr) {
; CHECK-LABEL: fixed_bitselect_v8i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov z0.s, #-1 // =0xffffffffffffffff
; CHECK-NEXT:    ldp q2, q1, [x0]
; CHECK-NEXT:    ldp q5, q4, [x1]
; CHECK-NEXT:    ldp q6, q7, [x2]
; CHECK-NEXT:    add z3.s, z1.s, z0.s
; CHECK-NEXT:    subr z1.s, z1.s, #0 // =0x0
; CHECK-NEXT:    add z0.s, z2.s, z0.s
; CHECK-NEXT:    subr z2.s, z2.s, #0 // =0x0
; CHECK-NEXT:    and z1.d, z1.d, z4.d
; CHECK-NEXT:    and z3.d, z3.d, z7.d
; CHECK-NEXT:    and z0.d, z0.d, z6.d
; CHECK-NEXT:    and z2.d, z2.d, z5.d
; CHECK-NEXT:    orr z1.d, z3.d, z1.d
; CHECK-NEXT:    orr z0.d, z0.d, z2.d
; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT:    ret
;
; NONEON-NOSVE-LABEL: fixed_bitselect_v8i32:
; NONEON-NOSVE:       // %bb.0:
; NONEON-NOSVE-NEXT:    ldp q0, q1, [x0]
; NONEON-NOSVE-NEXT:    ldp q2, q3, [x1]
; NONEON-NOSVE-NEXT:    ldp q4, q5, [x2]
; NONEON-NOSVE-NEXT:    stp q0, q2, [sp, #-128]!
; NONEON-NOSVE-NEXT:    .cfi_def_cfa_offset 128
; NONEON-NOSVE-NEXT:    stp q1, q3, [sp, #48]
; NONEON-NOSVE-NEXT:    ldp w8, w14, [sp, #48]
; NONEON-NOSVE-NEXT:    ldp w9, w4, [sp, #64]
; NONEON-NOSVE-NEXT:    ldp w13, w11, [sp, #56]
; NONEON-NOSVE-NEXT:    neg w3, w8
; NONEON-NOSVE-NEXT:    neg w15, w14
; NONEON-NOSVE-NEXT:    str q4, [sp, #32]
; NONEON-NOSVE-NEXT:    and w9, w3, w9
; NONEON-NOSVE-NEXT:    and w15, w15, w4
; NONEON-NOSVE-NEXT:    str q5, [sp, #80]
; NONEON-NOSVE-NEXT:    ldp w5, w3, [sp, #72]
; NONEON-NOSVE-NEXT:    ldp w16, w12, [sp]
; NONEON-NOSVE-NEXT:    neg w4, w11
; NONEON-NOSVE-NEXT:    neg w2, w13
; NONEON-NOSVE-NEXT:    sub w11, w11, #1
; NONEON-NOSVE-NEXT:    and w3, w4, w3
; NONEON-NOSVE-NEXT:    and w2, w2, w5
; NONEON-NOSVE-NEXT:    sub w13, w13, #1
; NONEON-NOSVE-NEXT:    ldp w6, w4, [sp, #16]
; NONEON-NOSVE-NEXT:    ldp w10, w17, [sp, #8]
; NONEON-NOSVE-NEXT:    neg w1, w16
; NONEON-NOSVE-NEXT:    neg w0, w12
; NONEON-NOSVE-NEXT:    sub w16, w16, #1
; NONEON-NOSVE-NEXT:    and w1, w1, w6
; NONEON-NOSVE-NEXT:    and w0, w0, w4
; NONEON-NOSVE-NEXT:    sub w12, w12, #1
; NONEON-NOSVE-NEXT:    ldp w5, w6, [sp, #24]
; NONEON-NOSVE-NEXT:    neg w18, w17
; NONEON-NOSVE-NEXT:    neg w4, w10
; NONEON-NOSVE-NEXT:    sub w17, w17, #1
; NONEON-NOSVE-NEXT:    sub w10, w10, #1
; NONEON-NOSVE-NEXT:    sub w14, w14, #1
; NONEON-NOSVE-NEXT:    sub w8, w8, #1
; NONEON-NOSVE-NEXT:    and w4, w4, w5
; NONEON-NOSVE-NEXT:    and w18, w18, w6
; NONEON-NOSVE-NEXT:    ldp w5, w6, [sp, #32]
; NONEON-NOSVE-NEXT:    and w16, w16, w5
; NONEON-NOSVE-NEXT:    and w12, w12, w6
; NONEON-NOSVE-NEXT:    ldp w5, w6, [sp, #40]
; NONEON-NOSVE-NEXT:    and w10, w10, w5
; NONEON-NOSVE-NEXT:    and w17, w17, w6
; NONEON-NOSVE-NEXT:    orr w17, w17, w18
; NONEON-NOSVE-NEXT:    orr w10, w10, w4
; NONEON-NOSVE-NEXT:    ldp w18, w4, [sp, #88]
; NONEON-NOSVE-NEXT:    ldp w5, w6, [sp, #80]
; NONEON-NOSVE-NEXT:    stp w10, w17, [sp, #104]
; NONEON-NOSVE-NEXT:    orr w10, w12, w0
; NONEON-NOSVE-NEXT:    orr w12, w16, w1
; NONEON-NOSVE-NEXT:    and w11, w11, w4
; NONEON-NOSVE-NEXT:    stp w12, w10, [sp, #96]
; NONEON-NOSVE-NEXT:    and w10, w13, w18
; NONEON-NOSVE-NEXT:    orr w11, w11, w3
; NONEON-NOSVE-NEXT:    and w12, w14, w6
; NONEON-NOSVE-NEXT:    orr w10, w10, w2
; NONEON-NOSVE-NEXT:    and w8, w8, w5
; NONEON-NOSVE-NEXT:    stp w10, w11, [sp, #120]
; NONEON-NOSVE-NEXT:    orr w10, w12, w15
; NONEON-NOSVE-NEXT:    orr w8, w8, w9
; NONEON-NOSVE-NEXT:    stp w8, w10, [sp, #112]
; NONEON-NOSVE-NEXT:    ldp q0, q1, [sp, #96]
; NONEON-NOSVE-NEXT:    add sp, sp, #128
; NONEON-NOSVE-NEXT:    ret
  %pre_cond = load <8 x i32>, ptr %pre_cond_ptr
  %left = load <8 x i32>, ptr %left_ptr
  %right = load <8 x i32>, ptr %right_ptr

  %neg_cond = sub <8 x i32> zeroinitializer, %pre_cond
  %min_cond = add <8 x i32> %pre_cond, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
  %left_bits_0 = and <8 x i32> %neg_cond, %left
  %right_bits_0 = and <8 x i32> %min_cond, %right
  %bsl0000 = or <8 x i32> %right_bits_0, %left_bits_0
  ret <8 x i32> %bsl0000
}