1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
declare i32 @llvm.ctlz.i32(i32, i1) #0
declare i64 @llvm.ctlz.i64(i64, i1) #1
define i32 @clrsb32(i32 %x) #2 {
; CHECK-LABEL: clrsb32:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: cls w0, w0
; CHECK-NEXT: ret
entry:
%shr = ashr i32 %x, 31
%xor = xor i32 %shr, %x
%mul = shl i32 %xor, 1
%add = or i32 %mul, 1
%0 = tail call i32 @llvm.ctlz.i32(i32 %add, i1 false)
ret i32 %0
}
define i64 @clrsb64(i64 %x) #3 {
; CHECK-LABEL: clrsb64:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: cls x0, x0
; CHECK-NEXT: ret
entry:
%shr = ashr i64 %x, 63
%xor = xor i64 %shr, %x
%mul = shl nsw i64 %xor, 1
%add = or i64 %mul, 1
%0 = tail call i64 @llvm.ctlz.i64(i64 %add, i1 false)
ret i64 %0
}
define i32 @clrsb32_zeroundef(i32 %x) #2 {
; CHECK-LABEL: clrsb32_zeroundef:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: cls w0, w0
; CHECK-NEXT: ret
entry:
%shr = ashr i32 %x, 31
%xor = xor i32 %shr, %x
%mul = shl i32 %xor, 1
%add = or i32 %mul, 1
%0 = tail call i32 @llvm.ctlz.i32(i32 %add, i1 true)
ret i32 %0
}
define i64 @clrsb64_zeroundef(i64 %x) #3 {
; CHECK-LABEL: clrsb64_zeroundef:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: cls x0, x0
; CHECK-NEXT: ret
entry:
%shr = ashr i64 %x, 63
%xor = xor i64 %shr, %x
%mul = shl nsw i64 %xor, 1
%add = or i64 %mul, 1
%0 = tail call i64 @llvm.ctlz.i64(i64 %add, i1 true)
ret i64 %0
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK-GI: {{.*}}
; CHECK-SD: {{.*}}
|