aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AArch64/machine-combiner-subadd.ll
blob: 6bee9f3f65662cea7cd6ba6a63995b0a1eedd85d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu %s -o - | FileCheck %s

; The test cases in this file check following transformation if the right form
; can reduce latency.
;     A - (B + C)  ==>   (A - B) - C

; 32 bit version.
define i32 @test1(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: test1:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    add w8, w0, #100
; CHECK-NEXT:    orr w9, w2, #0x80
; CHECK-NEXT:    eor w10, w1, w8, lsl #8
; CHECK-NEXT:    sub w8, w9, w8
; CHECK-NEXT:    sub w8, w8, w10
; CHECK-NEXT:    eor w0, w8, w10, asr #13
; CHECK-NEXT:    ret
entry:
  %c1  = or  i32 %c, 128
  %a1  = add i32 %a, 100
  %shl = shl i32 %a1, 8
  %xor = xor i32 %shl, %b
  %add = add i32 %xor, %a1
  %sub = sub i32 %c1, %add
  %shr = ashr i32 %xor, 13
  %xor2 = xor i32 %sub, %shr
  ret i32 %xor2
}

; 64 bit version.
define i64 @test2(i64 %a, i64 %b, i64 %c) {
; CHECK-LABEL: test2:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    add x8, x0, #100
; CHECK-NEXT:    orr x9, x2, #0x80
; CHECK-NEXT:    eor x10, x1, x8, lsl #8
; CHECK-NEXT:    sub x8, x9, x8
; CHECK-NEXT:    sub x8, x8, x10
; CHECK-NEXT:    eor x0, x8, x10, asr #13
; CHECK-NEXT:    ret
entry:
  %c1  = or  i64 %c, 128
  %a1  = add i64 %a, 100
  %shl = shl i64 %a1, 8
  %xor = xor i64 %shl, %b
  %add = add i64 %xor, %a1
  %sub = sub i64 %c1, %add
  %shr = ashr i64 %xor, 13
  %xor2 = xor i64 %sub, %shr
  ret i64 %xor2
}

; Negative test. The right form can't reduce latency.
define i32 @test3(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: test3:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    add w8, w0, #100
; CHECK-NEXT:    orr w9, w2, #0x80
; CHECK-NEXT:    eor w10, w1, w8, lsl #8
; CHECK-NEXT:    add w8, w9, w8
; CHECK-NEXT:    sub w8, w10, w8
; CHECK-NEXT:    eor w0, w8, w10, asr #13
; CHECK-NEXT:    ret
entry:
  %c1  = or  i32 %c, 128
  %a1  = add i32 %a, 100
  %shl = shl i32 %a1, 8
  %xor = xor i32 %shl, %b
  %add = add i32 %c1, %a1
  %sub = sub i32 %xor, %add
  %shr = ashr i32 %xor, 13
  %xor2 = xor i32 %sub, %shr
  ret i32 %xor2
}