diff options
Diffstat (limited to 'llvm/test')
146 files changed, 14206 insertions, 7897 deletions
diff --git a/llvm/test/Analysis/DependenceAnalysis/GCD.ll b/llvm/test/Analysis/DependenceAnalysis/GCD.ll index 03343e7..cb14d18 100644 --- a/llvm/test/Analysis/DependenceAnalysis/GCD.ll +++ b/llvm/test/Analysis/DependenceAnalysis/GCD.ll @@ -254,7 +254,7 @@ define void @gcd4(ptr %A, ptr %B, i64 %M, i64 %N) nounwind uwtable ssp { ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %conv, ptr %arrayidx, align 4 ; CHECK-NEXT: da analyze - output [* *]! ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: %0 = load i32, ptr %arrayidx16, align 4 -; CHECK-NEXT: da analyze - none! +; CHECK-NEXT: da analyze - flow [* *|<]! ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %0, ptr %B.addr.11, align 4 ; CHECK-NEXT: da analyze - confused! ; CHECK-NEXT: Src: %0 = load i32, ptr %arrayidx16, align 4 --> Dst: %0 = load i32, ptr %arrayidx16, align 4 @@ -322,7 +322,7 @@ define void @gcd5(ptr %A, ptr %B, i64 %M, i64 %N) nounwind uwtable ssp { ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %conv, ptr %arrayidx, align 4 ; CHECK-NEXT: da analyze - output [* *]! ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: %0 = load i32, ptr %arrayidx16, align 4 -; CHECK-NEXT: da analyze - flow [<> *]! +; CHECK-NEXT: da analyze - flow [* *|<]! ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %0, ptr %B.addr.11, align 4 ; CHECK-NEXT: da analyze - confused! ; CHECK-NEXT: Src: %0 = load i32, ptr %arrayidx16, align 4 --> Dst: %0 = load i32, ptr %arrayidx16, align 4 @@ -390,7 +390,7 @@ define void @gcd6(i64 %n, ptr %A, ptr %B) nounwind uwtable ssp { ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx5, align 4 --> Dst: store i32 %conv, ptr %arrayidx5, align 4 ; CHECK-NEXT: da analyze - output [* *]! ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx5, align 4 --> Dst: %2 = load i32, ptr %arrayidx9, align 4 -; CHECK-NEXT: da analyze - none! +; CHECK-NEXT: da analyze - flow [* *|<]! ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx5, align 4 --> Dst: store i32 %2, ptr %B.addr.12, align 4 ; CHECK-NEXT: da analyze - confused! ; CHECK-NEXT: Src: %2 = load i32, ptr %arrayidx9, align 4 --> Dst: %2 = load i32, ptr %arrayidx9, align 4 diff --git a/llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll b/llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll index cdfaec7..73a415b 100644 --- a/llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll +++ b/llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll @@ -384,7 +384,7 @@ define void @symbolicsiv6(ptr %A, ptr %B, i64 %n, i64 %N, i64 %M) nounwind uwtab ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %conv, ptr %arrayidx, align 4 ; CHECK-NEXT: da analyze - none! ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: %0 = load i32, ptr %arrayidx7, align 4 -; CHECK-NEXT: da analyze - none! +; CHECK-NEXT: da analyze - flow [*|<]! ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %0, ptr %B.addr.02, align 4 ; CHECK-NEXT: da analyze - confused! ; CHECK-NEXT: Src: %0 = load i32, ptr %arrayidx7, align 4 --> Dst: %0 = load i32, ptr %arrayidx7, align 4 @@ -440,7 +440,7 @@ define void @symbolicsiv7(ptr %A, ptr %B, i64 %n, i64 %N, i64 %M) nounwind uwtab ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %conv, ptr %arrayidx, align 4 ; CHECK-NEXT: da analyze - none! ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: %1 = load i32, ptr %arrayidx6, align 4 -; CHECK-NEXT: da analyze - flow [<>]! +; CHECK-NEXT: da analyze - flow [*|<]! ; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %1, ptr %B.addr.02, align 4 ; CHECK-NEXT: da analyze - confused! ; CHECK-NEXT: Src: %1 = load i32, ptr %arrayidx6, align 4 --> Dst: %1 = load i32, ptr %arrayidx6, align 4 diff --git a/llvm/test/Analysis/DependenceAnalysis/compute-absolute-value.ll b/llvm/test/Analysis/DependenceAnalysis/compute-absolute-value.ll index 64fad37..783150a 100644 --- a/llvm/test/Analysis/DependenceAnalysis/compute-absolute-value.ll +++ b/llvm/test/Analysis/DependenceAnalysis/compute-absolute-value.ll @@ -18,7 +18,7 @@ define void @unknown_sign(ptr %a, i64 %k) { ; CHECK-NEXT: Src: store i8 1, ptr %idx.0, align 1 --> Dst: store i8 1, ptr %idx.0, align 1 ; CHECK-NEXT: da analyze - none! ; CHECK-NEXT: Src: store i8 1, ptr %idx.0, align 1 --> Dst: store i8 2, ptr %idx.1, align 1 -; CHECK-NEXT: da analyze - output [<>]! +; CHECK-NEXT: da analyze - output [*|<]! ; CHECK-NEXT: Src: store i8 2, ptr %idx.1, align 1 --> Dst: store i8 2, ptr %idx.1, align 1 ; CHECK-NEXT: da analyze - none! ; diff --git a/llvm/test/Analysis/DependenceAnalysis/gcd-miv-overflow.ll b/llvm/test/Analysis/DependenceAnalysis/gcd-miv-overflow.ll new file mode 100644 index 0000000..9169ac3 --- /dev/null +++ b/llvm/test/Analysis/DependenceAnalysis/gcd-miv-overflow.ll @@ -0,0 +1,63 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -disable-output "-passes=print<da>" 2>&1 \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-ALL +; RUN: opt < %s -disable-output "-passes=print<da>" -da-enable-dependence-test=gcd-miv 2>&1 \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-GCD-MIV + +; offset0 = 4; +; offset1 = 0; +; for (i = 0; i < 100; i++) { +; A[offset0] = 1; +; A[offset1] = 2; +; offset0 += 3*m; +; offset1 += 3; +; } +; +; Dependency exists between the two stores. E.g., consider `m` is +; 12297829382473034411, which is a modular multiplicative inverse of 3 under +; modulo 2^64. Then `offset0` is effectively `i + 4`, so accesses will be as +; follows: +; +; - A[offset0] : A[4], A[5], A[6], ... +; - A[offset1] : A[0], A[3], A[6], ... +; +define void @gcdmiv_coef_ovfl(ptr %A, i64 %m) { +; CHECK-ALL-LABEL: 'gcdmiv_coef_ovfl' +; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-ALL-NEXT: da analyze - output [*|<]! +; CHECK-ALL-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; +; CHECK-GCD-MIV-LABEL: 'gcdmiv_coef_ovfl' +; CHECK-GCD-MIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1 +; CHECK-GCD-MIV-NEXT: da analyze - consistent output [*]! +; CHECK-GCD-MIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-GCD-MIV-NEXT: da analyze - consistent output [*|<]! +; CHECK-GCD-MIV-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-GCD-MIV-NEXT: da analyze - consistent output [*]! +; +entry: + %step = mul i64 3, %m + br label %loop + +loop: + %i = phi i64 [ 0, %entry ], [ %i.inc, %loop ] + %offset.0 = phi i64 [ 4, %entry ] , [ %offset.0.next, %loop ] + %offset.1 = phi i64 [ 0, %entry ] , [ %offset.1.next, %loop ] + %gep.0 = getelementptr inbounds i8, ptr %A, i64 %offset.0 + %gep.1 = getelementptr inbounds i8, ptr %A, i64 %offset.1 + store i8 1, ptr %gep.0 + store i8 2, ptr %gep.1 + %i.inc = add nuw nsw i64 %i, 1 + %offset.0.next = add nsw i64 %offset.0, %step + %offset.1.next = add nsw i64 %offset.1, 3 + %ec = icmp eq i64 %i.inc, 100 + br i1 %ec, label %exit, label %loop + +exit: + ret void +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/Analysis/DependenceAnalysis/strong-siv-overflow.ll b/llvm/test/Analysis/DependenceAnalysis/strong-siv-overflow.ll new file mode 100644 index 0000000..bf0fafc --- /dev/null +++ b/llvm/test/Analysis/DependenceAnalysis/strong-siv-overflow.ll @@ -0,0 +1,68 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -disable-output "-passes=print<da>" 2>&1 \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-ALL +; RUN: opt < %s -disable-output "-passes=print<da>" -da-enable-dependence-test=strong-siv 2>&1 \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-STRONG-SIV + +; for (i = 0; i < (1LL << 62); i++) { +; if (0 <= 2*i - 2) +; A[2*i - 2] = 1; +; +; if (0 <= 2*i - 4) +; A[2*i - 4] = 2; +; } +; +; FIXME: DependenceAnalysis currently detects no dependency between the two +; stores, but it does exist. For example, each store will access A[0] when i +; is 1 and 2 respectively. +; The root cause is that the product of the BTC and the coefficient +; ((1LL << 62) - 1 and 2) overflows in a signed sense. +define void @strongsiv_const_ovfl(ptr %A) { +; CHECK-LABEL: 'strongsiv_const_ovfl' +; CHECK-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1 +; CHECK-NEXT: da analyze - none! +; CHECK-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-NEXT: da analyze - none! +; CHECK-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-NEXT: da analyze - none! +; +entry: + br label %loop.header + +loop.header: + %i = phi i64 [ 0, %entry ], [ %i.inc, %loop.latch ] + %offset.0 = phi i64 [ -2, %entry ], [ %offset.0.next, %loop.latch ] + %offset.1 = phi i64 [ -4, %entry ], [ %offset.1.next, %loop.latch ] + %ec = icmp eq i64 %i, 4611686018427387904 + br i1 %ec, label %exit, label %loop.body + +loop.body: + %cond.0 = icmp sge i64 %offset.0, 0 + %cond.1 = icmp sge i64 %offset.1, 0 + br i1 %cond.0, label %if.then.0, label %loop.middle + +if.then.0: + %gep.0 = getelementptr inbounds i8, ptr %A, i64 %offset.0 + store i8 1, ptr %gep.0 + br label %loop.middle + +loop.middle: + br i1 %cond.1, label %if.then.1, label %loop.latch + +if.then.1: + %gep.1 = getelementptr inbounds i8, ptr %A, i64 %offset.1 + store i8 2, ptr %gep.1 + br label %loop.latch + +loop.latch: + %i.inc = add nuw nsw i64 %i, 1 + %offset.0.next = add nsw i64 %offset.0, 2 + %offset.1.next = add nsw i64 %offset.1, 2 + br label %loop.header + +exit: + ret void +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK-ALL: {{.*}} +; CHECK-STRONG-SIV: {{.*}} diff --git a/llvm/test/Analysis/DependenceAnalysis/symbolic-rdiv-overflow.ll b/llvm/test/Analysis/DependenceAnalysis/symbolic-rdiv-overflow.ll new file mode 100644 index 0000000..c5ff988 --- /dev/null +++ b/llvm/test/Analysis/DependenceAnalysis/symbolic-rdiv-overflow.ll @@ -0,0 +1,137 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -disable-output "-passes=print<da>" 2>&1 \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-ALL +; RUN: opt < %s -disable-output "-passes=print<da>" -da-enable-dependence-test=symbolic-rdiv 2>&1 \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-SYMBOLIC-RDIV + +; for (i = 0; i < (1LL << 62); i++) { +; if (0 <= 2*i - 2) +; A[2*i - 2] = 1; +; A[i] = 2; +; } +; +; FIXME: DependenceAnalysis currently detects no dependency between the two +; stores, but it does exist. For example, each store will access A[0] when i +; is 1 and 0 respectively. +; The root cause is that the product of the BTC and the coefficient +; ((1LL << 62) - 1 and 2) overflows in a signed sense. +define void @symbolicrdiv_prod_ovfl(ptr %A) { +; CHECK-ALL-LABEL: 'symbolicrdiv_prod_ovfl' +; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; +; CHECK-SYMBOLIC-RDIV-LABEL: 'symbolicrdiv_prod_ovfl' +; CHECK-SYMBOLIC-RDIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1 +; CHECK-SYMBOLIC-RDIV-NEXT: da analyze - none! +; CHECK-SYMBOLIC-RDIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-SYMBOLIC-RDIV-NEXT: da analyze - none! +; CHECK-SYMBOLIC-RDIV-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-SYMBOLIC-RDIV-NEXT: da analyze - consistent output [*]! +; +entry: + br label %loop.header + +loop.header: + %i = phi i64 [ 0, %entry ], [ %i.inc, %loop.latch ] + %offset = phi i64 [ -2, %entry ], [ %offset.next, %loop.latch ] + %ec = icmp eq i64 %i, 4611686018427387904 + br i1 %ec, label %exit, label %loop.body + +loop.body: + %cond = icmp sge i64 %offset, 0 + br i1 %cond, label %if.then, label %loop.latch + +if.then: + %gep.0 = getelementptr inbounds i8, ptr %A, i64 %offset + store i8 1, ptr %gep.0 + br label %loop.latch + +loop.latch: + %gep.1 = getelementptr inbounds i8, ptr %A, i64 %i + store i8 2, ptr %gep.1 + %i.inc = add nuw nsw i64 %i, 1 + %offset.next = add nsw i64 %offset, 2 + br label %loop.header + +exit: + ret void +} + +; offset0 = -4611686018427387904; // -2^62 +; offset1 = 4611686018427387904; // 2^62 +; for (i = 0; i < (1LL << 62) - 100; i++) { +; if (0 <= offset0) +; A[offset0] = 1; +; if (0 <= offset1) +; A[offset1] = 2; +; offset0 += 2; +; offset1 -= 1; +; } +; +; FIXME: DependenceAnalysis currently detects no dependency between the two +; stores, but it does exist. For example, +; +; memory access | i == 2^61 | i == 2^61 + 2^59 | i == 2^61 + 2^60 +; -------------------------|-----------|------------------|------------------- +; A[2*i - 2^62] (offset0) | | A[2^60] | A[2^61] +; A[-i + 2^62] (offset1) | A[2^61] | | A[2^60] +; +; The root cause is that the calculation of the differenct between the two +; constants (-2^62 and 2^62) overflows in a signed sense. +define void @symbolicrdiv_delta_ovfl(ptr %A) { +; CHECK-ALL-LABEL: 'symbolicrdiv_delta_ovfl' +; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; +; CHECK-SYMBOLIC-RDIV-LABEL: 'symbolicrdiv_delta_ovfl' +; CHECK-SYMBOLIC-RDIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1 +; CHECK-SYMBOLIC-RDIV-NEXT: da analyze - consistent output [*]! +; CHECK-SYMBOLIC-RDIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-SYMBOLIC-RDIV-NEXT: da analyze - none! +; CHECK-SYMBOLIC-RDIV-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-SYMBOLIC-RDIV-NEXT: da analyze - consistent output [*]! +; +entry: + br label %loop.header + +loop.header: + %i = phi i64 [ 0, %entry ], [ %i.inc, %loop.latch ] + %offset.0 = phi i64 [ -4611686018427387904, %entry ], [ %offset.0.next, %loop.latch ] + %offset.1 = phi i64 [ 4611686018427387904, %entry ], [ %offset.1.next, %loop.latch ] + %cond.0 = icmp sge i64 %offset.0, 0 + %cond.1 = icmp sge i64 %offset.1, 0 + br i1 %cond.0, label %if.then.0, label %loop.middle + +if.then.0: + %gep.0 = getelementptr inbounds i8, ptr %A, i64 %offset.0 + store i8 1, ptr %gep.0 + br label %loop.middle + +loop.middle: + br i1 %cond.1, label %if.then.1, label %loop.latch + +if.then.1: + %gep.1 = getelementptr inbounds i8, ptr %A, i64 %offset.1 + store i8 2, ptr %gep.1 + br label %loop.latch + +loop.latch: + %i.inc = add nuw nsw i64 %i, 1 + %offset.0.next = add nsw i64 %offset.0, 2 + %offset.1.next = sub nsw i64 %offset.1, 1 + %ec = icmp eq i64 %i.inc, 4611686018427387804 ; 2^62 - 100 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/Analysis/DependenceAnalysis/weak-crossing-siv-overflow.ll b/llvm/test/Analysis/DependenceAnalysis/weak-crossing-siv-overflow.ll new file mode 100644 index 0000000..ba57c7b --- /dev/null +++ b/llvm/test/Analysis/DependenceAnalysis/weak-crossing-siv-overflow.ll @@ -0,0 +1,125 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -disable-output "-passes=print<da>" 2>&1 \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-ALL +; RUN: opt < %s -disable-output "-passes=print<da>" -da-enable-dependence-test=weak-crossing-siv 2>&1 \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-WEAK-CROSSING-SIV + +; max_i = INT64_MAX/3 // 3074457345618258602 +; for (long long i = 0; i <= max_i; i++) { +; A[-3*i + INT64_MAX] = 0; +; if (i) +; A[3*i - 2] = 1; +; } +; +; FIXME: DependenceAnalysis currently detects no dependency between +; `A[-3*i + INT64_MAX]` and `A[3*i - 2]`, but it does exist. For example, +; +; memory access | i == 1 | i == max_i +; ---------------------|------------------|------------------ +; A[-3*i + INT64_MAX] | A[INT64_MAX - 3] | A[1] +; A[3*i - 2] | A[1] | A[INT64_MAX - 3] +; +; The root cause is that the calculation of the differenct between the two +; constants (INT64_MAX and -2) triggers an overflow. + +define void @weakcorssing_delta_ovfl(ptr %A) { +; CHECK-ALL-LABEL: 'weakcorssing_delta_ovfl' +; CHECK-ALL-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 0, ptr %idx.0, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 1, ptr %idx.1, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 1, ptr %idx.1, align 1 --> Dst: store i8 1, ptr %idx.1, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; +; CHECK-WEAK-CROSSING-SIV-LABEL: 'weakcorssing_delta_ovfl' +; CHECK-WEAK-CROSSING-SIV-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 0, ptr %idx.0, align 1 +; CHECK-WEAK-CROSSING-SIV-NEXT: da analyze - consistent output [*]! +; CHECK-WEAK-CROSSING-SIV-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 1, ptr %idx.1, align 1 +; CHECK-WEAK-CROSSING-SIV-NEXT: da analyze - none! +; CHECK-WEAK-CROSSING-SIV-NEXT: Src: store i8 1, ptr %idx.1, align 1 --> Dst: store i8 1, ptr %idx.1, align 1 +; CHECK-WEAK-CROSSING-SIV-NEXT: da analyze - consistent output [*]! +; +entry: + br label %loop.header + +loop.header: + %i = phi i64 [ 0, %entry ], [ %i.inc, %loop.latch ] + %subscript.0 = phi i64 [ 9223372036854775807, %entry ], [ %subscript.0.next, %loop.latch ] + %subscript.1 = phi i64 [ -2, %entry ], [ %subscript.1.next, %loop.latch ] + %idx.0 = getelementptr inbounds i8, ptr %A, i64 %subscript.0 + store i8 0, ptr %idx.0 + %cond.store = icmp ne i64 %i, 0 + br i1 %cond.store, label %if.store, label %loop.latch + +if.store: + %idx.1 = getelementptr inbounds i8, ptr %A, i64 %subscript.1 + store i8 1, ptr %idx.1 + br label %loop.latch + +loop.latch: + %i.inc = add nuw nsw i64 %i, 1 + %subscript.0.next = add nsw i64 %subscript.0, -3 + %subscript.1.next = add nsw i64 %subscript.1, 3 + %ec = icmp sgt i64 %i.inc, 3074457345618258602 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + +; max_i = INT64_MAX/3 // 3074457345618258602 +; for (long long i = 0; i <= max_i; i++) { +; A[-3*i + INT64_MAX] = 0; +; A[3*i + 1] = 1; +; } +; +; FIXME: DependenceAnalysis currently detects no dependency between +; `A[-3*i + INT64_MAX]` and `A[3*i - 2]`, but it does exist. For example, +; +; memory access | i == 0 | i == 1 | i == max_i - 1 | i == max_i +; ---------------------|--------|------------------|----------------|------------------ +; A[-3*i + INT64_MAX] | | A[INT64_MAX - 3] | A[1] | +; A[3*i + 1] | A[1] | | | A[INT64_MAX - 3] +; +; The root cause is that the product of the BTC, the coefficient, and 2 +; triggers an overflow. +; +define void @weakcorssing_prod_ovfl(ptr %A) { +; CHECK-ALL-LABEL: 'weakcorssing_prod_ovfl' +; CHECK-ALL-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 0, ptr %idx.0, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 1, ptr %idx.1, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 1, ptr %idx.1, align 1 --> Dst: store i8 1, ptr %idx.1, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; +; CHECK-WEAK-CROSSING-SIV-LABEL: 'weakcorssing_prod_ovfl' +; CHECK-WEAK-CROSSING-SIV-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 0, ptr %idx.0, align 1 +; CHECK-WEAK-CROSSING-SIV-NEXT: da analyze - consistent output [*]! +; CHECK-WEAK-CROSSING-SIV-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 1, ptr %idx.1, align 1 +; CHECK-WEAK-CROSSING-SIV-NEXT: da analyze - none! +; CHECK-WEAK-CROSSING-SIV-NEXT: Src: store i8 1, ptr %idx.1, align 1 --> Dst: store i8 1, ptr %idx.1, align 1 +; CHECK-WEAK-CROSSING-SIV-NEXT: da analyze - consistent output [*]! +; +entry: + br label %loop + +loop: + %i = phi i64 [ 0, %entry ], [ %i.inc, %loop ] + %subscript.0 = phi i64 [ 9223372036854775807, %entry ], [ %subscript.0.next, %loop ] + %subscript.1 = phi i64 [ 1, %entry ], [ %subscript.1.next, %loop ] + %idx.0 = getelementptr inbounds i8, ptr %A, i64 %subscript.0 + %idx.1 = getelementptr inbounds i8, ptr %A, i64 %subscript.1 + store i8 0, ptr %idx.0 + store i8 1, ptr %idx.1 + %i.inc = add nuw nsw i64 %i, 1 + %subscript.0.next = add nsw i64 %subscript.0, -3 + %subscript.1.next = add nsw i64 %subscript.1, 3 + %ec = icmp sgt i64 %i.inc, 3074457345618258602 + br i1 %ec, label %exit, label %loop + +exit: + ret void +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/Analysis/DependenceAnalysis/weak-zero-siv-overflow.ll b/llvm/test/Analysis/DependenceAnalysis/weak-zero-siv-overflow.ll new file mode 100644 index 0000000..6317c38 --- /dev/null +++ b/llvm/test/Analysis/DependenceAnalysis/weak-zero-siv-overflow.ll @@ -0,0 +1,122 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -disable-output "-passes=print<da>" 2>&1 \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-ALL +; RUN: opt < %s -disable-output "-passes=print<da>" -da-enable-dependence-test=weak-zero-siv 2>&1 \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-WEAK-ZERO-SIV + +; for (i = 0; i < (1LL << 62); i++) { +; if (0 <= 2*i - 2) +; A[2*i - 2] = 1; +; A[2] = 2; +; } +; +; FIXME: DependenceAnalysis currently detects no dependency between the two +; stores, but it does exist. The root cause is that the product of the BTC and +; the coefficient ((1LL << 62) - 1 and 2) overflows in a signed sense. +; +define void @weakzero_dst_siv_prod_ovfl(ptr %A) { +; CHECK-ALL-LABEL: 'weakzero_dst_siv_prod_ovfl' +; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-ALL-NEXT: da analyze - consistent output [S]! +; +; CHECK-WEAK-ZERO-SIV-LABEL: 'weakzero_dst_siv_prod_ovfl' +; CHECK-WEAK-ZERO-SIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1 +; CHECK-WEAK-ZERO-SIV-NEXT: da analyze - consistent output [*]! +; CHECK-WEAK-ZERO-SIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-WEAK-ZERO-SIV-NEXT: da analyze - none! +; CHECK-WEAK-ZERO-SIV-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-WEAK-ZERO-SIV-NEXT: da analyze - consistent output [S]! +; +entry: + br label %loop.header + +loop.header: + %i = phi i64 [ 0, %entry ], [ %i.inc, %loop.latch ] + %offset = phi i64 [ -2, %entry ], [ %offset.next, %loop.latch ] + %ec = icmp eq i64 %i, 4611686018427387904 + br i1 %ec, label %exit, label %loop.body + +loop.body: + %cond = icmp sge i64 %offset, 0 + br i1 %cond, label %if.then, label %loop.latch + +if.then: + %gep.0 = getelementptr inbounds i8, ptr %A, i64 %offset + store i8 1, ptr %gep.0 + br label %loop.latch + +loop.latch: + %gep.1 = getelementptr inbounds i8, ptr %A, i64 2 + store i8 2, ptr %gep.1 + %i.inc = add nuw nsw i64 %i, 1 + %offset.next = add nsw i64 %offset, 2 + br label %loop.header + +exit: + ret void +} + +; for (i = 0; i < n; i++) { +; if (0 <= 2*i - 1) +; A[2*i - 1] = 1; +; A[INT64_MAX] = 2; +; } +; +; FIXME: DependenceAnalysis currently detects no dependency between the two +; stores, but it does exist. When `%n` is 2^62, the value of `%offset` will be +; the same as INT64_MAX at the last iteration. +; The root cause is that the calculation of the difference between the two +; constants (INT64_MAX and -1) overflows in a signed sense. +; +define void @weakzero_dst_siv_delta_ovfl(ptr %A, i64 %n) { +; CHECK-ALL-LABEL: 'weakzero_dst_siv_delta_ovfl' +; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-ALL-NEXT: da analyze - none! +; CHECK-ALL-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-ALL-NEXT: da analyze - consistent output [S]! +; +; CHECK-WEAK-ZERO-SIV-LABEL: 'weakzero_dst_siv_delta_ovfl' +; CHECK-WEAK-ZERO-SIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1 +; CHECK-WEAK-ZERO-SIV-NEXT: da analyze - consistent output [*]! +; CHECK-WEAK-ZERO-SIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-WEAK-ZERO-SIV-NEXT: da analyze - none! +; CHECK-WEAK-ZERO-SIV-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1 +; CHECK-WEAK-ZERO-SIV-NEXT: da analyze - consistent output [S]! +; +entry: + %guard = icmp sgt i64 %n, 0 + br i1 %guard, label %loop.header, label %exit + +loop.header: + %i = phi i64 [ 0, %entry ], [ %i.inc, %loop.latch ] + %offset = phi i64 [ -2, %entry ], [ %offset.next, %loop.latch ] + %ec = icmp eq i64 %i, %n + br i1 %ec, label %exit, label %loop.body + +loop.body: + %cond = icmp sge i64 %offset, 0 + br i1 %cond, label %if.then, label %loop.latch + +if.then: + %gep.0 = getelementptr inbounds i8, ptr %A, i64 %offset + store i8 1, ptr %gep.0 + br label %loop.latch + +loop.latch: + %gep.1 = getelementptr inbounds i8, ptr %A, i64 9223372036854775807 + store i8 2, ptr %gep.1 + %i.inc = add nuw nsw i64 %i, 1 + %offset.next = add nsw i64 %offset, 2 + br label %loop.header + +exit: + ret void +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/Analysis/HashRecognize/cyclic-redundancy-check.ll b/llvm/test/Analysis/HashRecognize/cyclic-redundancy-check.ll index 7dec2f8..78b4139 100644 --- a/llvm/test/Analysis/HashRecognize/cyclic-redundancy-check.ll +++ b/llvm/test/Analysis/HashRecognize/cyclic-redundancy-check.ll @@ -1448,4 +1448,85 @@ exit: ; preds = %loop ret i16 %crc.next } +define i16 @not.crc.data.next.outside.user(i16 %crc.init, i16 %data.init) { +; CHECK-LABEL: 'not.crc.data.next.outside.user' +; CHECK-NEXT: Did not find a hash algorithm +; CHECK-NEXT: Reason: Recurrences have stray uses +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %crc = phi i16 [ %crc.init, %entry ], [ %crc.next, %loop ] + %data = phi i16 [ %data.init, %entry ], [ %data.next, %loop ] + %xor.crc.data = xor i16 %data, %crc + %crc.shl = shl i16 %crc, 1 + %crc.xor = xor i16 %crc.shl, 3 + %check.sb = icmp slt i16 %xor.crc.data, 0 + %crc.next = select i1 %check.sb, i16 %crc.xor, i16 %crc.shl + %data.next = shl i16 %data, 1 + %iv.next = add nuw nsw i32 %iv, 1 + %exit.cond = icmp samesign ult i32 %iv, 7 + br i1 %exit.cond, label %loop, label %exit + +exit: + %ret = xor i16 %data.next, %crc.next + ret i16 %ret +} + +define i16 @not.crc.data.phi.outside.user(i16 %crc.init, i16 %data.init) { +; CHECK-LABEL: 'not.crc.data.phi.outside.user' +; CHECK-NEXT: Did not find a hash algorithm +; CHECK-NEXT: Reason: Recurrences have stray uses +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %crc = phi i16 [ %crc.init, %entry ], [ %crc.next, %loop ] + %data = phi i16 [ %data.init, %entry ], [ %data.next, %loop ] + %xor.crc.data = xor i16 %data, %crc + %crc.shl = shl i16 %crc, 1 + %crc.xor = xor i16 %crc.shl, 3 + %check.sb = icmp slt i16 %xor.crc.data, 0 + %crc.next = select i1 %check.sb, i16 %crc.xor, i16 %crc.shl + %data.next = shl i16 %data, 1 + %iv.next = add nuw nsw i32 %iv, 1 + %exit.cond = icmp samesign ult i32 %iv, 7 + br i1 %exit.cond, label %loop, label %exit + +exit: + %ret = xor i16 %data, %crc.next + ret i16 %ret +} + +define i16 @not.crc.crc.phi.outside.user(i16 %crc.init, i16 %data.init) { +; CHECK-LABEL: 'not.crc.crc.phi.outside.user' +; CHECK-NEXT: Did not find a hash algorithm +; CHECK-NEXT: Reason: Recurrences have stray uses +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %crc = phi i16 [ %crc.init, %entry ], [ %crc.next, %loop ] + %data = phi i16 [ %data.init, %entry ], [ %data.next, %loop ] + %xor.crc.data = xor i16 %data, %crc + %crc.shl = shl i16 %crc, 1 + %crc.xor = xor i16 %crc.shl, 3 + %check.sb = icmp slt i16 %xor.crc.data, 0 + %crc.next = select i1 %check.sb, i16 %crc.xor, i16 %crc.shl + %data.next = shl i16 %data, 1 + %iv.next = add nuw nsw i32 %iv, 1 + %exit.cond = icmp samesign ult i32 %iv, 7 + br i1 %exit.cond, label %loop, label %exit + +exit: + %ret = xor i16 %crc, %crc.next + ret i16 %ret +} + declare i16 @side.effect() diff --git a/llvm/test/Assembler/constant-getelementptr-scalable_pointee.ll b/llvm/test/Assembler/constant-getelementptr-scalable_pointee.ll new file mode 100644 index 0000000..d390399 --- /dev/null +++ b/llvm/test/Assembler/constant-getelementptr-scalable_pointee.ll @@ -0,0 +1,8 @@ +; RUN: not llvm-as < %s 2>&1 | FileCheck %s +; Test the case of an invalid pointee type on a constant GEP + +; CHECK: invalid base element for constant getelementptr + +define ptr @test_scalable_vector_gep(ptr %a) { + ret ptr getelementptr (<vscale x 1 x i8>, ptr @a, i64 1) +} diff --git a/llvm/test/Bitcode/dbg-data-size-roundtrip.ll b/llvm/test/Bitcode/dbg-data-size-roundtrip.ll new file mode 100644 index 0000000..36a9253 --- /dev/null +++ b/llvm/test/Bitcode/dbg-data-size-roundtrip.ll @@ -0,0 +1,19 @@ +; RUN: opt %s -o - -S | llvm-as - | llvm-dis - | FileCheck %s + +; CHECK: !DIBasicType(name: "unsigned _BitInt", size: 32, dataSize: 17, encoding: DW_ATE_unsigned) + +@a = global i8 0, align 1, !dbg !0 + +!llvm.dbg.cu = !{!2} +!llvm.module.flags = !{!6, !7} +!llvm.ident = !{!8} + +!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression()) +!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !3, line: 4, type: !5, isLocal: false, isDefinition: true) +!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !3, producer: "clang version 22.0.0git", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, globals: !4, splitDebugInlining: false, nameTableKind: None) +!3 = !DIFile(filename: "bit-int.c", directory: "/") +!4 = !{!0} +!5 = !DIBasicType(name: "unsigned _BitInt", size: 32, dataSize: 17, encoding: DW_ATE_unsigned) +!6 = !{i32 2, !"Debug Info Version", i32 3} +!7 = !{i32 1, !"wchar_size", i32 4} +!8 = !{!"clang version 22.0.0git"} diff --git a/llvm/test/Bitcode/dwarf-objc-property.ll b/llvm/test/Bitcode/dwarf-objc-property.ll new file mode 100644 index 0000000..f054f57 --- /dev/null +++ b/llvm/test/Bitcode/dwarf-objc-property.ll @@ -0,0 +1,46 @@ +; RUN: llvm-as < %s | llvm-dis | FileCheck %s +; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s + +; CHECK: !DIObjCProperty(name: "autoSynthProp", file: !3, line: 5, attributes: 2316, type: !8) +; CHECK: !DIObjCProperty(name: "synthProp", file: !3, line: 6, attributes: 2316, type: !8) +; CHECK: !DIObjCProperty(name: "customGetterProp", file: !3, line: 7, getter: "customGetter", attributes: 2318, type: !8) +; CHECK: !DIObjCProperty(name: "customSetterProp", file: !3, line: 8, setter: "customSetter:", attributes: 2444, type: !8) +; CHECK: !DIObjCProperty(name: "customAccessorsProp", file: !3, line: 9, setter: "customSetter:", getter: "customGetter", attributes: 2446, type: !8) + +!llvm.module.flags = !{!0, !1} +!llvm.dbg.cu = !{!2} + +!0 = !{i32 7, !"Dwarf Version", i32 5} +!1 = !{i32 2, !"Debug Info Version", i32 3} +!2 = distinct !DICompileUnit(language: DW_LANG_ObjC, file: !3, producer: "hand written", isOptimized: false, runtimeVersion: 2, emissionKind: FullDebug, retainedTypes: !4, splitDebugInlining: false, debugInfoForProfiling: true, nameTableKind: Apple) +!3 = !DIFile(filename: "main.m", directory: "/tmp") +!4 = !{!5} +!5 = !DICompositeType(tag: DW_TAG_structure_type, name: "Foo", scope: !3, file: !3, line: 1, size: 128, flags: DIFlagObjcClassComplete, elements: !6, runtimeLang: DW_LANG_ObjC) +!6 = !{!7, !9, !10, !11, !12, !13, !14, !15, !16, !17, !24, !27, !28, !29, !30, !31, !32} +!7 = !DIObjCProperty(name: "autoSynthProp", file: !3, line: 5, attributes: 2316, type: !8) +!8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) +!9 = !DIObjCProperty(name: "synthProp", file: !3, line: 6, attributes: 2316, type: !8) +!10 = !DIObjCProperty(name: "customGetterProp", file: !3, line: 7, getter: "customGetter", attributes: 2318, type: !8) +!11 = !DIObjCProperty(name: "customSetterProp", file: !3, line: 8, setter: "customSetter:", attributes: 2444, type: !8) +!12 = !DIObjCProperty(name: "customAccessorsProp", file: !3, line: 9, setter: "customSetter:", getter: "customGetter", attributes: 2446, type: !8) +!13 = !DIDerivedType(tag: DW_TAG_member, name: "someBackingIvar", scope: !3, file: !3, line: 2, baseType: !8, size: 32, flags: DIFlagProtected, extraData: !9) +!14 = !DIDerivedType(tag: DW_TAG_member, name: "_autoSynthProp", scope: !3, file: !3, line: 5, baseType: !8, size: 32, flags: DIFlagPrivate, extraData: !7) +!15 = !DIDerivedType(tag: DW_TAG_member, name: "_customGetterProp", scope: !3, file: !3, line: 7, baseType: !8, size: 32, flags: DIFlagPrivate, extraData: !10) +!16 = !DIDerivedType(tag: DW_TAG_member, name: "_customSetterProp", scope: !3, file: !3, line: 8, baseType: !8, size: 32, flags: DIFlagPrivate, extraData: !11) +!17 = !DISubprogram(name: "-[Foo customGetter]", scope: !5, file: !3, line: 19, type: !18, scopeLine: 19, flags: DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!18 = !DISubroutineType(types: !19) +!19 = !{!8, !20, !21} +!20 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !5, size: 64, flags: DIFlagArtificial | DIFlagObjectPointer) +!21 = !DIDerivedType(tag: DW_TAG_typedef, name: "SEL", file: !3, baseType: !22, flags: DIFlagArtificial) +!22 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !23, size: 64) +!23 = !DICompositeType(tag: DW_TAG_structure_type, name: "objc_selector", file: !3, flags: DIFlagFwdDecl) +!24 = !DISubprogram(name: "-[Foo customSetter:]", scope: !5, file: !3, line: 23, type: !25, scopeLine: 23, flags: DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!25 = !DISubroutineType(types: !26) +!26 = !{null, !20, !21, !8} +!27 = !DISubprogram(name: "-[Foo synthProp]", scope: !5, file: !3, line: 17, type: !18, scopeLine: 17, flags: DIFlagArtificial | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!28 = !DISubprogram(name: "-[Foo setSynthProp:]", scope: !5, file: !3, line: 17, type: !25, scopeLine: 17, flags: DIFlagArtificial | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!29 = !DISubprogram(name: "-[Foo autoSynthProp]", scope: !5, file: !3, line: 5, type: !18, scopeLine: 5, flags: DIFlagArtificial | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!30 = !DISubprogram(name: "-[Foo setAutoSynthProp:]", scope: !5, file: !3, line: 5, type: !25, scopeLine: 5, flags: DIFlagArtificial | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!31 = !DISubprogram(name: "-[Foo setCustomGetterProp:]", scope: !5, file: !3, line: 7, type: !25, scopeLine: 7, flags: DIFlagArtificial | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!32 = !DISubprogram(name: "-[Foo customSetterProp]", scope: !5, file: !3, line: 8, type: !18, scopeLine: 8, flags: DIFlagArtificial | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) + diff --git a/llvm/test/CodeGen/AArch64/arm64-srl-and.ll b/llvm/test/CodeGen/AArch64/arm64-srl-and.ll index b58f6ba..330f27b 100644 --- a/llvm/test/CodeGen/AArch64/arm64-srl-and.ll +++ b/llvm/test/CodeGen/AArch64/arm64-srl-and.ll @@ -1,22 +1,38 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=aarch64-linux-gnu -O3 < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -O3 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-linux-gnu -O3 -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI ; This used to miscompile: ; The 16-bit -1 should not become 32-bit -1 (sub w8, w8, #1). @g = global i16 0, align 4 define i32 @srl_and() { -; CHECK-LABEL: srl_and: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: adrp x8, :got:g -; CHECK-NEXT: mov w9, #50 -; CHECK-NEXT: ldr x8, [x8, :got_lo12:g] -; CHECK-NEXT: ldrh w8, [x8] -; CHECK-NEXT: eor w8, w8, w9 -; CHECK-NEXT: mov w9, #65535 -; CHECK-NEXT: add w8, w8, w9 -; CHECK-NEXT: and w0, w8, w8, lsr #16 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: srl_and: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: adrp x8, :got:g +; CHECK-SD-NEXT: mov w9, #50 // =0x32 +; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:g] +; CHECK-SD-NEXT: ldrh w8, [x8] +; CHECK-SD-NEXT: eor w8, w8, w9 +; CHECK-SD-NEXT: mov w9, #65535 // =0xffff +; CHECK-SD-NEXT: add w8, w8, w9 +; CHECK-SD-NEXT: and w0, w8, w8, lsr #16 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: srl_and: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: adrp x8, :got:g +; CHECK-GI-NEXT: mov w9, #50 // =0x32 +; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:g] +; CHECK-GI-NEXT: ldrh w8, [x8] +; CHECK-GI-NEXT: eor w8, w8, w9 +; CHECK-GI-NEXT: mov w9, #65535 // =0xffff +; CHECK-GI-NEXT: add w8, w9, w8, uxth +; CHECK-GI-NEXT: and w9, w8, #0xffff +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: cset w8, ne +; CHECK-GI-NEXT: and w0, w9, w8 +; CHECK-GI-NEXT: ret entry: %0 = load i16, ptr @g, align 4 %1 = xor i16 %0, 50 @@ -29,3 +45,5 @@ entry: ret i32 %and } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/eor3.ll b/llvm/test/CodeGen/AArch64/eor3.ll index eccd091..594a73f 100644 --- a/llvm/test/CodeGen/AArch64/eor3.ll +++ b/llvm/test/CodeGen/AArch64/eor3.ll @@ -277,3 +277,154 @@ define <2 x i64> @eor3_vnot(<2 x i64> %0, <2 x i64> %1) { ret <2 x i64> %4 } +define <1 x i64> @eor3_1x64(<1 x i64> %0, <1 x i64> %1, <1 x i64> %2) { +; SHA3-LABEL: eor3_1x64: +; SHA3: // %bb.0: +; SHA3-NEXT: // kill: def $d0 killed $d0 def $q0 +; SHA3-NEXT: // kill: def $d2 killed $d2 def $q2 +; SHA3-NEXT: // kill: def $d1 killed $d1 def $q1 +; SHA3-NEXT: eor3 v0.16b, v1.16b, v2.16b, v0.16b +; SHA3-NEXT: // kill: def $d0 killed $d0 killed $q0 +; SHA3-NEXT: ret +; +; NOSHA3-LABEL: eor3_1x64: +; NOSHA3: // %bb.0: +; NOSHA3-NEXT: eor v1.8b, v1.8b, v2.8b +; NOSHA3-NEXT: eor v0.8b, v1.8b, v0.8b +; NOSHA3-NEXT: ret +; +; SVE2-LABEL: eor3_1x64: +; SVE2: // %bb.0: +; SVE2-NEXT: // kill: def $d1 killed $d1 def $z1 +; SVE2-NEXT: // kill: def $d2 killed $d2 def $z2 +; SVE2-NEXT: // kill: def $d0 killed $d0 def $z0 +; SVE2-NEXT: eor3 z1.d, z1.d, z2.d, z0.d +; SVE2-NEXT: fmov d0, d1 +; SVE2-NEXT: ret +; +; SHA3-SVE2-LABEL: eor3_1x64: +; SHA3-SVE2: // %bb.0: +; SHA3-SVE2-NEXT: // kill: def $d0 killed $d0 def $q0 +; SHA3-SVE2-NEXT: // kill: def $d2 killed $d2 def $q2 +; SHA3-SVE2-NEXT: // kill: def $d1 killed $d1 def $q1 +; SHA3-SVE2-NEXT: eor3 v0.16b, v1.16b, v2.16b, v0.16b +; SHA3-SVE2-NEXT: // kill: def $d0 killed $d0 killed $q0 +; SHA3-SVE2-NEXT: ret + %4 = xor <1 x i64> %1, %2 + %5 = xor <1 x i64> %4, %0 + ret <1 x i64> %5 +} + +define <2 x i32> @eor3_2x32(<2 x i32> %0, <2 x i32> %1, <2 x i32> %2) { +; SHA3-LABEL: eor3_2x32: +; SHA3: // %bb.0: +; SHA3-NEXT: // kill: def $d0 killed $d0 def $q0 +; SHA3-NEXT: // kill: def $d2 killed $d2 def $q2 +; SHA3-NEXT: // kill: def $d1 killed $d1 def $q1 +; SHA3-NEXT: eor3 v0.16b, v1.16b, v2.16b, v0.16b +; SHA3-NEXT: // kill: def $d0 killed $d0 killed $q0 +; SHA3-NEXT: ret +; +; NOSHA3-LABEL: eor3_2x32: +; NOSHA3: // %bb.0: +; NOSHA3-NEXT: eor v1.8b, v1.8b, v2.8b +; NOSHA3-NEXT: eor v0.8b, v1.8b, v0.8b +; NOSHA3-NEXT: ret +; +; SVE2-LABEL: eor3_2x32: +; SVE2: // %bb.0: +; SVE2-NEXT: // kill: def $d1 killed $d1 def $z1 +; SVE2-NEXT: // kill: def $d2 killed $d2 def $z2 +; SVE2-NEXT: // kill: def $d0 killed $d0 def $z0 +; SVE2-NEXT: eor3 z1.d, z1.d, z2.d, z0.d +; SVE2-NEXT: fmov d0, d1 +; SVE2-NEXT: ret +; +; SHA3-SVE2-LABEL: eor3_2x32: +; SHA3-SVE2: // %bb.0: +; SHA3-SVE2-NEXT: // kill: def $d0 killed $d0 def $q0 +; SHA3-SVE2-NEXT: // kill: def $d2 killed $d2 def $q2 +; SHA3-SVE2-NEXT: // kill: def $d1 killed $d1 def $q1 +; SHA3-SVE2-NEXT: eor3 v0.16b, v1.16b, v2.16b, v0.16b +; SHA3-SVE2-NEXT: // kill: def $d0 killed $d0 killed $q0 +; SHA3-SVE2-NEXT: ret + %4 = xor <2 x i32> %1, %2 + %5 = xor <2 x i32> %4, %0 + ret <2 x i32> %5 +} + +define <4 x i16> @eor3_4x16(<4 x i16> %0, <4 x i16> %1, <4 x i16> %2) { +; SHA3-LABEL: eor3_4x16: +; SHA3: // %bb.0: +; SHA3-NEXT: // kill: def $d0 killed $d0 def $q0 +; SHA3-NEXT: // kill: def $d2 killed $d2 def $q2 +; SHA3-NEXT: // kill: def $d1 killed $d1 def $q1 +; SHA3-NEXT: eor3 v0.16b, v1.16b, v2.16b, v0.16b +; SHA3-NEXT: // kill: def $d0 killed $d0 killed $q0 +; SHA3-NEXT: ret +; +; NOSHA3-LABEL: eor3_4x16: +; NOSHA3: // %bb.0: +; NOSHA3-NEXT: eor v1.8b, v1.8b, v2.8b +; NOSHA3-NEXT: eor v0.8b, v1.8b, v0.8b +; NOSHA3-NEXT: ret +; +; SVE2-LABEL: eor3_4x16: +; SVE2: // %bb.0: +; SVE2-NEXT: // kill: def $d1 killed $d1 def $z1 +; SVE2-NEXT: // kill: def $d2 killed $d2 def $z2 +; SVE2-NEXT: // kill: def $d0 killed $d0 def $z0 +; SVE2-NEXT: eor3 z1.d, z1.d, z2.d, z0.d +; SVE2-NEXT: fmov d0, d1 +; SVE2-NEXT: ret +; +; SHA3-SVE2-LABEL: eor3_4x16: +; SHA3-SVE2: // %bb.0: +; SHA3-SVE2-NEXT: // kill: def $d0 killed $d0 def $q0 +; SHA3-SVE2-NEXT: // kill: def $d2 killed $d2 def $q2 +; SHA3-SVE2-NEXT: // kill: def $d1 killed $d1 def $q1 +; SHA3-SVE2-NEXT: eor3 v0.16b, v1.16b, v2.16b, v0.16b +; SHA3-SVE2-NEXT: // kill: def $d0 killed $d0 killed $q0 +; SHA3-SVE2-NEXT: ret + %4 = xor <4 x i16> %1, %2 + %5 = xor <4 x i16> %4, %0 + ret <4 x i16> %5 +} + +define <8 x i8> @eor3_8x8(<8 x i8> %0, <8 x i8> %1, <8 x i8> %2) { +; SHA3-LABEL: eor3_8x8: +; SHA3: // %bb.0: +; SHA3-NEXT: // kill: def $d0 killed $d0 def $q0 +; SHA3-NEXT: // kill: def $d2 killed $d2 def $q2 +; SHA3-NEXT: // kill: def $d1 killed $d1 def $q1 +; SHA3-NEXT: eor3 v0.16b, v1.16b, v2.16b, v0.16b +; SHA3-NEXT: // kill: def $d0 killed $d0 killed $q0 +; SHA3-NEXT: ret +; +; NOSHA3-LABEL: eor3_8x8: +; NOSHA3: // %bb.0: +; NOSHA3-NEXT: eor v1.8b, v1.8b, v2.8b +; NOSHA3-NEXT: eor v0.8b, v1.8b, v0.8b +; NOSHA3-NEXT: ret +; +; SVE2-LABEL: eor3_8x8: +; SVE2: // %bb.0: +; SVE2-NEXT: // kill: def $d1 killed $d1 def $z1 +; SVE2-NEXT: // kill: def $d2 killed $d2 def $z2 +; SVE2-NEXT: // kill: def $d0 killed $d0 def $z0 +; SVE2-NEXT: eor3 z1.d, z1.d, z2.d, z0.d +; SVE2-NEXT: fmov d0, d1 +; SVE2-NEXT: ret +; +; SHA3-SVE2-LABEL: eor3_8x8: +; SHA3-SVE2: // %bb.0: +; SHA3-SVE2-NEXT: // kill: def $d0 killed $d0 def $q0 +; SHA3-SVE2-NEXT: // kill: def $d2 killed $d2 def $q2 +; SHA3-SVE2-NEXT: // kill: def $d1 killed $d1 def $q1 +; SHA3-SVE2-NEXT: eor3 v0.16b, v1.16b, v2.16b, v0.16b +; SHA3-SVE2-NEXT: // kill: def $d0 killed $d0 killed $q0 +; SHA3-SVE2-NEXT: ret + %4 = xor <8 x i8> %1, %2 + %5 = xor <8 x i8> %4, %0 + ret <8 x i8> %5 +} diff --git a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll index c3fdc7d..8438f0b0 100644 --- a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll +++ b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=aarch64-unknown-unknown < %s | FileCheck %s --check-prefix=CHECK +; RUN: llc -mtriple=aarch64-unknown-unknown < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-unknown-unknown -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI ; We are looking for the following pattern here: ; (X & (C l>> Y)) ==/!= 0 @@ -13,12 +14,21 @@ ; i8 scalar define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind { -; CHECK-LABEL: scalar_i8_signbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: tst w8, #0x80 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i8_signbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0x80 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i8_signbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #128 // =0x80 +; CHECK-GI-NEXT: and w9, w1, #0xff +; CHECK-GI-NEXT: lsr w8, w8, w9 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i8 128, %y %t1 = and i8 %t0, %x %res = icmp eq i8 %t1, 0 @@ -26,12 +36,21 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind { } define i1 @scalar_i8_lowestbit_eq(i8 %x, i8 %y) nounwind { -; CHECK-LABEL: scalar_i8_lowestbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: tst w8, #0x1 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i8_lowestbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0x1 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i8_lowestbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: and w9, w1, #0xff +; CHECK-GI-NEXT: lsr w8, w8, w9 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i8 1, %y %t1 = and i8 %t0, %x %res = icmp eq i8 %t1, 0 @@ -39,12 +58,21 @@ define i1 @scalar_i8_lowestbit_eq(i8 %x, i8 %y) nounwind { } define i1 @scalar_i8_bitsinmiddle_eq(i8 %x, i8 %y) nounwind { -; CHECK-LABEL: scalar_i8_bitsinmiddle_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: tst w8, #0x18 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i8_bitsinmiddle_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0x18 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i8_bitsinmiddle_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #24 // =0x18 +; CHECK-GI-NEXT: and w9, w1, #0xff +; CHECK-GI-NEXT: lsr w8, w8, w9 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i8 24, %y %t1 = and i8 %t0, %x %res = icmp eq i8 %t1, 0 @@ -54,12 +82,21 @@ define i1 @scalar_i8_bitsinmiddle_eq(i8 %x, i8 %y) nounwind { ; i16 scalar define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind { -; CHECK-LABEL: scalar_i16_signbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: tst w8, #0x8000 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i16_signbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0x8000 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i16_signbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #32768 // =0x8000 +; CHECK-GI-NEXT: and w9, w1, #0xffff +; CHECK-GI-NEXT: lsr w8, w8, w9 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i16 32768, %y %t1 = and i16 %t0, %x %res = icmp eq i16 %t1, 0 @@ -67,12 +104,21 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind { } define i1 @scalar_i16_lowestbit_eq(i16 %x, i16 %y) nounwind { -; CHECK-LABEL: scalar_i16_lowestbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: tst w8, #0x1 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i16_lowestbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0x1 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i16_lowestbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: and w9, w1, #0xffff +; CHECK-GI-NEXT: lsr w8, w8, w9 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i16 1, %y %t1 = and i16 %t0, %x %res = icmp eq i16 %t1, 0 @@ -80,12 +126,21 @@ define i1 @scalar_i16_lowestbit_eq(i16 %x, i16 %y) nounwind { } define i1 @scalar_i16_bitsinmiddle_eq(i16 %x, i16 %y) nounwind { -; CHECK-LABEL: scalar_i16_bitsinmiddle_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: tst w8, #0xff0 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i16_bitsinmiddle_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0xff0 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i16_bitsinmiddle_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #4080 // =0xff0 +; CHECK-GI-NEXT: and w9, w1, #0xffff +; CHECK-GI-NEXT: lsr w8, w8, w9 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i16 4080, %y %t1 = and i16 %t0, %x %res = icmp eq i16 %t1, 0 @@ -95,12 +150,20 @@ define i1 @scalar_i16_bitsinmiddle_eq(i16 %x, i16 %y) nounwind { ; i32 scalar define i1 @scalar_i32_signbit_eq(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: scalar_i32_signbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: tst w8, #0x80000000 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i32_signbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0x80000000 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i32_signbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-2147483648 // =0x80000000 +; CHECK-GI-NEXT: lsr w8, w8, w1 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i32 2147483648, %y %t1 = and i32 %t0, %x %res = icmp eq i32 %t1, 0 @@ -108,12 +171,20 @@ define i1 @scalar_i32_signbit_eq(i32 %x, i32 %y) nounwind { } define i1 @scalar_i32_lowestbit_eq(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: scalar_i32_lowestbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: tst w8, #0x1 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i32_lowestbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0x1 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i32_lowestbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: lsr w8, w8, w1 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i32 1, %y %t1 = and i32 %t0, %x %res = icmp eq i32 %t1, 0 @@ -121,12 +192,20 @@ define i1 @scalar_i32_lowestbit_eq(i32 %x, i32 %y) nounwind { } define i1 @scalar_i32_bitsinmiddle_eq(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: scalar_i32_bitsinmiddle_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: tst w8, #0xffff00 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i32_bitsinmiddle_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0xffff00 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i32_bitsinmiddle_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #16776960 // =0xffff00 +; CHECK-GI-NEXT: lsr w8, w8, w1 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i32 16776960, %y %t1 = and i32 %t0, %x %res = icmp eq i32 %t1, 0 @@ -136,12 +215,20 @@ define i1 @scalar_i32_bitsinmiddle_eq(i32 %x, i32 %y) nounwind { ; i64 scalar define i1 @scalar_i64_signbit_eq(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: scalar_i64_signbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl x8, x0, x1 -; CHECK-NEXT: tst x8, #0x8000000000000000 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i64_signbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl x8, x0, x1 +; CHECK-SD-NEXT: tst x8, #0x8000000000000000 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i64_signbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000 +; CHECK-GI-NEXT: lsr x8, x8, x1 +; CHECK-GI-NEXT: tst x8, x0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i64 9223372036854775808, %y %t1 = and i64 %t0, %x %res = icmp eq i64 %t1, 0 @@ -149,12 +236,20 @@ define i1 @scalar_i64_signbit_eq(i64 %x, i64 %y) nounwind { } define i1 @scalar_i64_lowestbit_eq(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: scalar_i64_lowestbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl x8, x0, x1 -; CHECK-NEXT: tst x8, #0x1 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i64_lowestbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl x8, x0, x1 +; CHECK-SD-NEXT: tst x8, #0x1 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i64_lowestbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: lsr x8, x8, x1 +; CHECK-GI-NEXT: tst x8, x0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i64 1, %y %t1 = and i64 %t0, %x %res = icmp eq i64 %t1, 0 @@ -162,12 +257,20 @@ define i1 @scalar_i64_lowestbit_eq(i64 %x, i64 %y) nounwind { } define i1 @scalar_i64_bitsinmiddle_eq(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: scalar_i64_bitsinmiddle_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl x8, x0, x1 -; CHECK-NEXT: tst x8, #0xffffffff0000 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i64_bitsinmiddle_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl x8, x0, x1 +; CHECK-SD-NEXT: tst x8, #0xffffffff0000 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i64_bitsinmiddle_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #281474976645120 // =0xffffffff0000 +; CHECK-GI-NEXT: lsr x8, x8, x1 +; CHECK-GI-NEXT: tst x8, x0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i64 281474976645120, %y %t1 = and i64 %t0, %x %res = icmp eq i64 %t1, 0 @@ -179,14 +282,24 @@ define i1 @scalar_i64_bitsinmiddle_eq(i64 %x, i64 %y) nounwind { ;------------------------------------------------------------------------------; define <4 x i1> @vec_4xi32_splat_eq(<4 x i32> %x, <4 x i32> %y) nounwind { -; CHECK-LABEL: vec_4xi32_splat_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v2.4s, #1 -; CHECK-NEXT: ushl v0.4s, v0.4s, v1.4s -; CHECK-NEXT: and v0.16b, v0.16b, v2.16b -; CHECK-NEXT: cmeq v0.4s, v0.4s, #0 -; CHECK-NEXT: xtn v0.4h, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vec_4xi32_splat_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v2.4s, #1 +; CHECK-SD-NEXT: ushl v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: and v0.16b, v0.16b, v2.16b +; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vec_4xi32_splat_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v2.4s, #1 +; CHECK-GI-NEXT: neg v1.4s, v1.4s +; CHECK-GI-NEXT: ushl v1.4s, v2.4s, v1.4s +; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b +; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret %t0 = lshr <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y %t1 = and <4 x i32> %t0, %x %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0> @@ -211,44 +324,86 @@ define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind { } define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwind { -; CHECK-LABEL: vec_4xi32_nonsplat_undef0_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v2.4s, #1 -; CHECK-NEXT: ushl v0.4s, v0.4s, v1.4s -; CHECK-NEXT: and v0.16b, v0.16b, v2.16b -; CHECK-NEXT: cmeq v0.4s, v0.4s, #0 -; CHECK-NEXT: xtn v0.4h, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vec_4xi32_nonsplat_undef0_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v2.4s, #1 +; CHECK-SD-NEXT: ushl v0.4s, v0.4s, v1.4s +; CHECK-SD-NEXT: and v0.16b, v0.16b, v2.16b +; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vec_4xi32_nonsplat_undef0_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: neg v1.4s, v1.4s +; CHECK-GI-NEXT: fmov s2, w8 +; CHECK-GI-NEXT: mov v2.s[1], w8 +; CHECK-GI-NEXT: mov v2.s[3], w8 +; CHECK-GI-NEXT: ushl v1.4s, v2.4s, v1.4s +; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b +; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret %t0 = lshr <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y %t1 = and <4 x i32> %t0, %x %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0> ret <4 x i1> %res } define <4 x i1> @vec_4xi32_nonsplat_undef1_eq(<4 x i32> %x, <4 x i32> %y) nounwind { -; CHECK-LABEL: vec_4xi32_nonsplat_undef1_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v2.4s, #1 -; CHECK-NEXT: neg v1.4s, v1.4s -; CHECK-NEXT: ushl v1.4s, v2.4s, v1.4s -; CHECK-NEXT: and v0.16b, v1.16b, v0.16b -; CHECK-NEXT: cmeq v0.4s, v0.4s, #0 -; CHECK-NEXT: xtn v0.4h, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vec_4xi32_nonsplat_undef1_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v2.4s, #1 +; CHECK-SD-NEXT: neg v1.4s, v1.4s +; CHECK-SD-NEXT: ushl v1.4s, v2.4s, v1.4s +; CHECK-SD-NEXT: and v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vec_4xi32_nonsplat_undef1_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi d2, #0000000000000000 +; CHECK-GI-NEXT: movi v3.4s, #1 +; CHECK-GI-NEXT: neg v1.4s, v1.4s +; CHECK-GI-NEXT: mov v2.s[1], wzr +; CHECK-GI-NEXT: ushl v1.4s, v3.4s, v1.4s +; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b +; CHECK-GI-NEXT: mov v2.s[3], wzr +; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, v2.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret %t0 = lshr <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y %t1 = and <4 x i32> %t0, %x %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0> ret <4 x i1> %res } define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwind { -; CHECK-LABEL: vec_4xi32_nonsplat_undef2_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v2.4s, #1 -; CHECK-NEXT: neg v1.4s, v1.4s -; CHECK-NEXT: ushl v1.4s, v2.4s, v1.4s -; CHECK-NEXT: and v0.16b, v1.16b, v0.16b -; CHECK-NEXT: cmeq v0.4s, v0.4s, #0 -; CHECK-NEXT: xtn v0.4h, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vec_4xi32_nonsplat_undef2_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v2.4s, #1 +; CHECK-SD-NEXT: neg v1.4s, v1.4s +; CHECK-SD-NEXT: ushl v1.4s, v2.4s, v1.4s +; CHECK-SD-NEXT: and v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vec_4xi32_nonsplat_undef2_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: movi d2, #0000000000000000 +; CHECK-GI-NEXT: neg v1.4s, v1.4s +; CHECK-GI-NEXT: fmov s3, w8 +; CHECK-GI-NEXT: mov v3.s[1], w8 +; CHECK-GI-NEXT: mov v2.s[1], wzr +; CHECK-GI-NEXT: mov v3.s[3], w8 +; CHECK-GI-NEXT: mov v2.s[3], wzr +; CHECK-GI-NEXT: ushl v1.4s, v3.4s, v1.4s +; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b +; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, v2.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret %t0 = lshr <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y %t1 = and <4 x i32> %t0, %x %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0> @@ -260,11 +415,20 @@ define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwi ;------------------------------------------------------------------------------; define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind { -; CHECK-LABEL: scalar_i8_signbit_ne: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: ubfx w0, w8, #7, #1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i8_signbit_ne: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl w8, w0, w1 +; CHECK-SD-NEXT: ubfx w0, w8, #7, #1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i8_signbit_ne: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #128 // =0x80 +; CHECK-GI-NEXT: and w9, w1, #0xff +; CHECK-GI-NEXT: lsr w8, w8, w9 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, ne +; CHECK-GI-NEXT: ret %t0 = lshr i8 128, %y %t1 = and i8 %t0, %x %res = icmp ne i8 %t1, 0 ; we are perfectly happy with 'ne' predicate @@ -315,14 +479,24 @@ define i1 @scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind { } define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind { -; CHECK-LABEL: scalar_i8_signbit_eq_with_nonzero: -; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #128 // =0x80 -; CHECK-NEXT: lsr w8, w8, w1 -; CHECK-NEXT: and w8, w8, w0 -; CHECK-NEXT: cmp w8, #1 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i8_signbit_eq_with_nonzero: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #128 // =0x80 +; CHECK-SD-NEXT: lsr w8, w8, w1 +; CHECK-SD-NEXT: and w8, w8, w0 +; CHECK-SD-NEXT: cmp w8, #1 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i8_signbit_eq_with_nonzero: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #128 // =0x80 +; CHECK-GI-NEXT: and w9, w1, #0xff +; CHECK-GI-NEXT: lsr w8, w8, w9 +; CHECK-GI-NEXT: and w8, w8, w0 +; CHECK-GI-NEXT: cmp w8, #1 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = lshr i8 128, %y %t1 = and i8 %t0, %x %res = icmp eq i8 %t1, 1 ; should be comparing with 0 diff --git a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll index 4a73b10..cc1bf27 100644 --- a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll +++ b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=aarch64-unknown-unknown < %s | FileCheck %s --check-prefix=CHECK +; RUN: llc -mtriple=aarch64-unknown-unknown < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-unknown-unknown -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI ; We are looking for the following pattern here: ; (X & (C << Y)) ==/!= 0 @@ -13,13 +14,23 @@ ; i8 scalar define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind { -; CHECK-LABEL: scalar_i8_signbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: lsr w8, w8, w1 -; CHECK-NEXT: tst w8, #0x80 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i8_signbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and w8, w0, #0xff +; CHECK-SD-NEXT: lsr w8, w8, w1 +; CHECK-SD-NEXT: tst w8, #0x80 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i8_signbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-128 // =0xffffff80 +; CHECK-GI-NEXT: and w9, w1, #0xff +; CHECK-GI-NEXT: lsl w8, w8, w9 +; CHECK-GI-NEXT: and w8, w8, w0 +; CHECK-GI-NEXT: tst w8, #0xff +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = shl i8 128, %y %t1 = and i8 %t0, %x %res = icmp eq i8 %t1, 0 @@ -27,13 +38,23 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind { } define i1 @scalar_i8_lowestbit_eq(i8 %x, i8 %y) nounwind { -; CHECK-LABEL: scalar_i8_lowestbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: lsr w8, w8, w1 -; CHECK-NEXT: tst w8, #0x1 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i8_lowestbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and w8, w0, #0xff +; CHECK-SD-NEXT: lsr w8, w8, w1 +; CHECK-SD-NEXT: tst w8, #0x1 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i8_lowestbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: and w9, w1, #0xff +; CHECK-GI-NEXT: lsl w8, w8, w9 +; CHECK-GI-NEXT: and w8, w8, w0 +; CHECK-GI-NEXT: tst w8, #0xff +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = shl i8 1, %y %t1 = and i8 %t0, %x %res = icmp eq i8 %t1, 0 @@ -41,13 +62,23 @@ define i1 @scalar_i8_lowestbit_eq(i8 %x, i8 %y) nounwind { } define i1 @scalar_i8_bitsinmiddle_eq(i8 %x, i8 %y) nounwind { -; CHECK-LABEL: scalar_i8_bitsinmiddle_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: lsr w8, w8, w1 -; CHECK-NEXT: tst w8, #0x18 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i8_bitsinmiddle_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and w8, w0, #0xff +; CHECK-SD-NEXT: lsr w8, w8, w1 +; CHECK-SD-NEXT: tst w8, #0x18 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i8_bitsinmiddle_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #24 // =0x18 +; CHECK-GI-NEXT: and w9, w1, #0xff +; CHECK-GI-NEXT: lsl w8, w8, w9 +; CHECK-GI-NEXT: and w8, w8, w0 +; CHECK-GI-NEXT: tst w8, #0xff +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = shl i8 24, %y %t1 = and i8 %t0, %x %res = icmp eq i8 %t1, 0 @@ -57,13 +88,23 @@ define i1 @scalar_i8_bitsinmiddle_eq(i8 %x, i8 %y) nounwind { ; i16 scalar define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind { -; CHECK-LABEL: scalar_i16_signbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: lsr w8, w8, w1 -; CHECK-NEXT: tst w8, #0x8000 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i16_signbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and w8, w0, #0xffff +; CHECK-SD-NEXT: lsr w8, w8, w1 +; CHECK-SD-NEXT: tst w8, #0x8000 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i16_signbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-32768 // =0xffff8000 +; CHECK-GI-NEXT: and w9, w1, #0xffff +; CHECK-GI-NEXT: lsl w8, w8, w9 +; CHECK-GI-NEXT: and w8, w8, w0 +; CHECK-GI-NEXT: tst w8, #0xffff +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = shl i16 32768, %y %t1 = and i16 %t0, %x %res = icmp eq i16 %t1, 0 @@ -71,13 +112,23 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind { } define i1 @scalar_i16_lowestbit_eq(i16 %x, i16 %y) nounwind { -; CHECK-LABEL: scalar_i16_lowestbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: lsr w8, w8, w1 -; CHECK-NEXT: tst w8, #0x1 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i16_lowestbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and w8, w0, #0xffff +; CHECK-SD-NEXT: lsr w8, w8, w1 +; CHECK-SD-NEXT: tst w8, #0x1 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i16_lowestbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: and w9, w1, #0xffff +; CHECK-GI-NEXT: lsl w8, w8, w9 +; CHECK-GI-NEXT: and w8, w8, w0 +; CHECK-GI-NEXT: tst w8, #0xffff +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = shl i16 1, %y %t1 = and i16 %t0, %x %res = icmp eq i16 %t1, 0 @@ -85,13 +136,23 @@ define i1 @scalar_i16_lowestbit_eq(i16 %x, i16 %y) nounwind { } define i1 @scalar_i16_bitsinmiddle_eq(i16 %x, i16 %y) nounwind { -; CHECK-LABEL: scalar_i16_bitsinmiddle_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: lsr w8, w8, w1 -; CHECK-NEXT: tst w8, #0xff0 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i16_bitsinmiddle_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and w8, w0, #0xffff +; CHECK-SD-NEXT: lsr w8, w8, w1 +; CHECK-SD-NEXT: tst w8, #0xff0 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i16_bitsinmiddle_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #4080 // =0xff0 +; CHECK-GI-NEXT: and w9, w1, #0xffff +; CHECK-GI-NEXT: lsl w8, w8, w9 +; CHECK-GI-NEXT: and w8, w8, w0 +; CHECK-GI-NEXT: tst w8, #0xffff +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = shl i16 4080, %y %t1 = and i16 %t0, %x %res = icmp eq i16 %t1, 0 @@ -101,12 +162,20 @@ define i1 @scalar_i16_bitsinmiddle_eq(i16 %x, i16 %y) nounwind { ; i32 scalar define i1 @scalar_i32_signbit_eq(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: scalar_i32_signbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsr w8, w0, w1 -; CHECK-NEXT: tst w8, #0x80000000 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i32_signbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsr w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0x80000000 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i32_signbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-2147483648 // =0x80000000 +; CHECK-GI-NEXT: lsl w8, w8, w1 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = shl i32 2147483648, %y %t1 = and i32 %t0, %x %res = icmp eq i32 %t1, 0 @@ -114,12 +183,20 @@ define i1 @scalar_i32_signbit_eq(i32 %x, i32 %y) nounwind { } define i1 @scalar_i32_lowestbit_eq(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: scalar_i32_lowestbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsr w8, w0, w1 -; CHECK-NEXT: tst w8, #0x1 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i32_lowestbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsr w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0x1 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i32_lowestbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: lsl w8, w8, w1 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = shl i32 1, %y %t1 = and i32 %t0, %x %res = icmp eq i32 %t1, 0 @@ -127,12 +204,20 @@ define i1 @scalar_i32_lowestbit_eq(i32 %x, i32 %y) nounwind { } define i1 @scalar_i32_bitsinmiddle_eq(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: scalar_i32_bitsinmiddle_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsr w8, w0, w1 -; CHECK-NEXT: tst w8, #0xffff00 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i32_bitsinmiddle_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsr w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0xffff00 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i32_bitsinmiddle_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #16776960 // =0xffff00 +; CHECK-GI-NEXT: lsl w8, w8, w1 +; CHECK-GI-NEXT: tst w8, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = shl i32 16776960, %y %t1 = and i32 %t0, %x %res = icmp eq i32 %t1, 0 @@ -142,12 +227,20 @@ define i1 @scalar_i32_bitsinmiddle_eq(i32 %x, i32 %y) nounwind { ; i64 scalar define i1 @scalar_i64_signbit_eq(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: scalar_i64_signbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsr x8, x0, x1 -; CHECK-NEXT: tst x8, #0x8000000000000000 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i64_signbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsr x8, x0, x1 +; CHECK-SD-NEXT: tst x8, #0x8000000000000000 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i64_signbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000 +; CHECK-GI-NEXT: lsl x8, x8, x1 +; CHECK-GI-NEXT: tst x8, x0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = shl i64 9223372036854775808, %y %t1 = and i64 %t0, %x %res = icmp eq i64 %t1, 0 @@ -155,12 +248,20 @@ define i1 @scalar_i64_signbit_eq(i64 %x, i64 %y) nounwind { } define i1 @scalar_i64_lowestbit_eq(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: scalar_i64_lowestbit_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsr x8, x0, x1 -; CHECK-NEXT: tst x8, #0x1 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i64_lowestbit_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsr x8, x0, x1 +; CHECK-SD-NEXT: tst x8, #0x1 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i64_lowestbit_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: lsl x8, x8, x1 +; CHECK-GI-NEXT: tst x8, x0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = shl i64 1, %y %t1 = and i64 %t0, %x %res = icmp eq i64 %t1, 0 @@ -168,12 +269,20 @@ define i1 @scalar_i64_lowestbit_eq(i64 %x, i64 %y) nounwind { } define i1 @scalar_i64_bitsinmiddle_eq(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: scalar_i64_bitsinmiddle_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: lsr x8, x0, x1 -; CHECK-NEXT: tst x8, #0xffffffff0000 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i64_bitsinmiddle_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsr x8, x0, x1 +; CHECK-SD-NEXT: tst x8, #0xffffffff0000 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i64_bitsinmiddle_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #281474976645120 // =0xffffffff0000 +; CHECK-GI-NEXT: lsl x8, x8, x1 +; CHECK-GI-NEXT: tst x8, x0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %t0 = shl i64 281474976645120, %y %t1 = and i64 %t0, %x %res = icmp eq i64 %t1, 0 @@ -216,42 +325,81 @@ define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind { } define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwind { -; CHECK-LABEL: vec_4xi32_nonsplat_undef0_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v2.4s, #1 -; CHECK-NEXT: ushl v1.4s, v2.4s, v1.4s -; CHECK-NEXT: and v0.16b, v1.16b, v0.16b -; CHECK-NEXT: cmeq v0.4s, v0.4s, #0 -; CHECK-NEXT: xtn v0.4h, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vec_4xi32_nonsplat_undef0_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v2.4s, #1 +; CHECK-SD-NEXT: ushl v1.4s, v2.4s, v1.4s +; CHECK-SD-NEXT: and v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vec_4xi32_nonsplat_undef0_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: fmov s2, w8 +; CHECK-GI-NEXT: mov v2.s[1], w8 +; CHECK-GI-NEXT: mov v2.s[3], w8 +; CHECK-GI-NEXT: ushl v1.4s, v2.4s, v1.4s +; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b +; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret %t0 = shl <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y %t1 = and <4 x i32> %t0, %x %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0> ret <4 x i1> %res } define <4 x i1> @vec_4xi32_nonsplat_undef1_eq(<4 x i32> %x, <4 x i32> %y) nounwind { -; CHECK-LABEL: vec_4xi32_nonsplat_undef1_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v2.4s, #1 -; CHECK-NEXT: ushl v1.4s, v2.4s, v1.4s -; CHECK-NEXT: and v0.16b, v1.16b, v0.16b -; CHECK-NEXT: cmeq v0.4s, v0.4s, #0 -; CHECK-NEXT: xtn v0.4h, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vec_4xi32_nonsplat_undef1_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v2.4s, #1 +; CHECK-SD-NEXT: ushl v1.4s, v2.4s, v1.4s +; CHECK-SD-NEXT: and v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vec_4xi32_nonsplat_undef1_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi d3, #0000000000000000 +; CHECK-GI-NEXT: movi v2.4s, #1 +; CHECK-GI-NEXT: mov v3.s[1], wzr +; CHECK-GI-NEXT: ushl v1.4s, v2.4s, v1.4s +; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b +; CHECK-GI-NEXT: mov v3.s[3], wzr +; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, v3.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret %t0 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y %t1 = and <4 x i32> %t0, %x %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0> ret <4 x i1> %res } define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwind { -; CHECK-LABEL: vec_4xi32_nonsplat_undef2_eq: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v2.4s, #1 -; CHECK-NEXT: ushl v1.4s, v2.4s, v1.4s -; CHECK-NEXT: and v0.16b, v1.16b, v0.16b -; CHECK-NEXT: cmeq v0.4s, v0.4s, #0 -; CHECK-NEXT: xtn v0.4h, v0.4s -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vec_4xi32_nonsplat_undef2_eq: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v2.4s, #1 +; CHECK-SD-NEXT: ushl v1.4s, v2.4s, v1.4s +; CHECK-SD-NEXT: and v0.16b, v1.16b, v0.16b +; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vec_4xi32_nonsplat_undef2_eq: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: movi d3, #0000000000000000 +; CHECK-GI-NEXT: fmov s2, w8 +; CHECK-GI-NEXT: mov v2.s[1], w8 +; CHECK-GI-NEXT: mov v3.s[1], wzr +; CHECK-GI-NEXT: mov v2.s[3], w8 +; CHECK-GI-NEXT: mov v3.s[3], wzr +; CHECK-GI-NEXT: ushl v1.4s, v2.4s, v1.4s +; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b +; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, v3.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret %t0 = shl <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y %t1 = and <4 x i32> %t0, %x %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0> @@ -263,12 +411,22 @@ define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwi ;------------------------------------------------------------------------------; define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind { -; CHECK-LABEL: scalar_i8_signbit_ne: -; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: lsr w8, w8, w1 -; CHECK-NEXT: lsr w0, w8, #7 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i8_signbit_ne: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and w8, w0, #0xff +; CHECK-SD-NEXT: lsr w8, w8, w1 +; CHECK-SD-NEXT: lsr w0, w8, #7 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i8_signbit_ne: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-128 // =0xffffff80 +; CHECK-GI-NEXT: and w9, w1, #0xff +; CHECK-GI-NEXT: lsl w8, w8, w9 +; CHECK-GI-NEXT: and w8, w8, w0 +; CHECK-GI-NEXT: tst w8, #0xff +; CHECK-GI-NEXT: cset w0, ne +; CHECK-GI-NEXT: ret %t0 = shl i8 128, %y %t1 = and i8 %t0, %x %res = icmp ne i8 %t1, 0 ; we are perfectly happy with 'ne' predicate @@ -310,13 +468,24 @@ define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind { } define i1 @scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind { -; CHECK-LABEL: scalar_i8_bitsinmiddle_slt: -; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #24 // =0x18 -; CHECK-NEXT: lsl w8, w8, w1 -; CHECK-NEXT: and w8, w8, w0 -; CHECK-NEXT: ubfx w0, w8, #7, #1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i8_bitsinmiddle_slt: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #24 // =0x18 +; CHECK-SD-NEXT: lsl w8, w8, w1 +; CHECK-SD-NEXT: and w8, w8, w0 +; CHECK-SD-NEXT: ubfx w0, w8, #7, #1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i8_bitsinmiddle_slt: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #24 // =0x18 +; CHECK-GI-NEXT: and w9, w1, #0xff +; CHECK-GI-NEXT: lsl w8, w8, w9 +; CHECK-GI-NEXT: and w8, w8, w0 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: cset w0, mi +; CHECK-GI-NEXT: ret %t0 = shl i8 24, %y %t1 = and i8 %t0, %x %res = icmp slt i8 %t1, 0 @@ -324,15 +493,20 @@ define i1 @scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind { } define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind { -; CHECK-LABEL: scalar_i8_signbit_eq_with_nonzero: -; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #-128 // =0xffffff80 -; CHECK-NEXT: lsl w8, w8, w1 -; CHECK-NEXT: and w8, w8, w0 -; CHECK-NEXT: and w8, w8, #0x80 -; CHECK-NEXT: cmp w8, #1 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_i8_signbit_eq_with_nonzero: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #-128 // =0xffffff80 +; CHECK-SD-NEXT: lsl w8, w8, w1 +; CHECK-SD-NEXT: and w8, w8, w0 +; CHECK-SD-NEXT: and w8, w8, #0x80 +; CHECK-SD-NEXT: cmp w8, #1 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_i8_signbit_eq_with_nonzero: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w0, wzr +; CHECK-GI-NEXT: ret %t0 = shl i8 128, %y %t1 = and i8 %t0, %x %res = icmp eq i8 %t1, 1 ; should be comparing with 0 diff --git a/llvm/test/CodeGen/AArch64/signbit-test.ll b/llvm/test/CodeGen/AArch64/signbit-test.ll index c74a934..298495b 100644 --- a/llvm/test/CodeGen/AArch64/signbit-test.ll +++ b/llvm/test/CodeGen/AArch64/signbit-test.ll @@ -1,13 +1,21 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s +; RUN: llc -mtriple=aarch64-- < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-- -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI define i64 @test_clear_mask_i64_i32(i64 %x) nounwind { -; CHECK-LABEL: test_clear_mask_i64_i32: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: mov w8, #42 // =0x2a -; CHECK-NEXT: cmn w0, #1 -; CHECK-NEXT: csel x0, x8, x0, gt -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_clear_mask_i64_i32: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: mov w8, #42 // =0x2a +; CHECK-SD-NEXT: cmn w0, #1 +; CHECK-SD-NEXT: csel x0, x8, x0, gt +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_clear_mask_i64_i32: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov w8, #42 // =0x2a +; CHECK-GI-NEXT: tst x0, #0x80000000 +; CHECK-GI-NEXT: csel x0, x8, x0, eq +; CHECK-GI-NEXT: ret entry: %a = and i64 %x, 2147483648 %r = icmp eq i64 %a, 0 diff --git a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll index 7c80f93..fc01c6b 100644 --- a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll +++ b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-unknown-linux-gnu -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI ; https://bugs.llvm.org/show_bug.cgi?id=38149 @@ -19,13 +20,22 @@ ; ---------------------------------------------------------------------------- ; define i1 @shifts_eqcmp_i16_i8(i16 %x) nounwind { -; CHECK-LABEL: shifts_eqcmp_i16_i8: -; CHECK: // %bb.0: -; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, w0, uxth -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: shifts_eqcmp_i16_i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sxtb w8, w0 +; CHECK-SD-NEXT: and w8, w8, #0xffff +; CHECK-SD-NEXT: cmp w8, w0, uxth +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: shifts_eqcmp_i16_i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: lsl w8, w0, #8 +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 +; CHECK-GI-NEXT: and w8, w8, #0xffff +; CHECK-GI-NEXT: cmp w8, w0, uxth +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret %tmp0 = shl i16 %x, 8 ; 16-8 %tmp1 = ashr exact i16 %tmp0, 8 ; 16-8 %tmp2 = icmp eq i16 %tmp1, %x @@ -97,26 +107,43 @@ define i1 @shifts_eqcmp_i64_i8(i64 %x) nounwind { ; ---------------------------------------------------------------------------- ; define i1 @add_ugecmp_i16_i8(i16 %x) nounwind { -; CHECK-LABEL: add_ugecmp_i16_i8: -; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, #128 -; CHECK-NEXT: lsr w8, w8, #8 -; CHECK-NEXT: cmp w8, #254 -; CHECK-NEXT: cset w0, hi -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ugecmp_i16_i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and w8, w0, #0xffff +; CHECK-SD-NEXT: sub w8, w8, #128 +; CHECK-SD-NEXT: lsr w8, w8, #8 +; CHECK-SD-NEXT: cmp w8, #254 +; CHECK-SD-NEXT: cset w0, hi +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ugecmp_i16_i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-128 // =0xffffff80 +; CHECK-GI-NEXT: mov w9, #65280 // =0xff00 +; CHECK-GI-NEXT: add w8, w8, w0, uxth +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: cset w0, hs +; CHECK-GI-NEXT: ret %tmp0 = add i16 %x, -128 ; ~0U << (8-1) %tmp1 = icmp uge i16 %tmp0, -256 ; ~0U << 8 ret i1 %tmp1 } define i1 @add_ugecmp_i32_i16_i8(i16 %xx) nounwind { -; CHECK-LABEL: add_ugecmp_i32_i16_i8: -; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: cmp w8, w8, sxtb -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ugecmp_i32_i16_i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and w8, w0, #0xffff +; CHECK-SD-NEXT: cmp w8, w8, sxtb +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ugecmp_i32_i16_i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-128 // =0xffffff80 +; CHECK-GI-NEXT: add w8, w8, w0, uxth +; CHECK-GI-NEXT: cmn w8, #256 +; CHECK-GI-NEXT: cset w0, hs +; CHECK-GI-NEXT: ret %x = zext i16 %xx to i32 %tmp0 = add i32 %x, -128 ; ~0U << (8-1) %tmp1 = icmp uge i32 %tmp0, -256 ; ~0U << 8 @@ -124,55 +151,92 @@ define i1 @add_ugecmp_i32_i16_i8(i16 %xx) nounwind { } define i1 @add_ugecmp_i32_i16(i32 %x) nounwind { -; CHECK-LABEL: add_ugecmp_i32_i16: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, w0, sxth -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ugecmp_i32_i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp w0, w0, sxth +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ugecmp_i32_i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub w8, w0, #8, lsl #12 // =32768 +; CHECK-GI-NEXT: cmn w8, #16, lsl #12 // =65536 +; CHECK-GI-NEXT: cset w0, hs +; CHECK-GI-NEXT: ret %tmp0 = add i32 %x, -32768 ; ~0U << (16-1) %tmp1 = icmp uge i32 %tmp0, -65536 ; ~0U << 16 ret i1 %tmp1 } define i1 @add_ugecmp_i32_i8(i32 %x) nounwind { -; CHECK-LABEL: add_ugecmp_i32_i8: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, w0, sxtb -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ugecmp_i32_i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp w0, w0, sxtb +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ugecmp_i32_i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub w8, w0, #128 +; CHECK-GI-NEXT: cmn w8, #256 +; CHECK-GI-NEXT: cset w0, hs +; CHECK-GI-NEXT: ret %tmp0 = add i32 %x, -128 ; ~0U << (8-1) %tmp1 = icmp uge i32 %tmp0, -256 ; ~0U << 8 ret i1 %tmp1 } define i1 @add_ugecmp_i64_i32(i64 %x) nounwind { -; CHECK-LABEL: add_ugecmp_i64_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, w0, sxtw -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ugecmp_i64_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp x0, w0, sxtw +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ugecmp_i64_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-2147483648 // =0xffffffff80000000 +; CHECK-GI-NEXT: mov x9, #-4294967296 // =0xffffffff00000000 +; CHECK-GI-NEXT: add x8, x0, x8 +; CHECK-GI-NEXT: cmp x8, x9 +; CHECK-GI-NEXT: cset w0, hs +; CHECK-GI-NEXT: ret %tmp0 = add i64 %x, -2147483648 ; ~0U << (32-1) %tmp1 = icmp uge i64 %tmp0, -4294967296 ; ~0U << 32 ret i1 %tmp1 } define i1 @add_ugecmp_i64_i16(i64 %x) nounwind { -; CHECK-LABEL: add_ugecmp_i64_i16: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, w0, sxth -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ugecmp_i64_i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp x0, w0, sxth +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ugecmp_i64_i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub x8, x0, #8, lsl #12 // =32768 +; CHECK-GI-NEXT: cmn x8, #16, lsl #12 // =65536 +; CHECK-GI-NEXT: cset w0, hs +; CHECK-GI-NEXT: ret %tmp0 = add i64 %x, -32768 ; ~0U << (16-1) %tmp1 = icmp uge i64 %tmp0, -65536 ; ~0U << 16 ret i1 %tmp1 } define i1 @add_ugecmp_i64_i8(i64 %x) nounwind { -; CHECK-LABEL: add_ugecmp_i64_i8: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, w0, sxtb -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ugecmp_i64_i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp x0, w0, sxtb +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ugecmp_i64_i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub x8, x0, #128 +; CHECK-GI-NEXT: cmn x8, #256 +; CHECK-GI-NEXT: cset w0, hs +; CHECK-GI-NEXT: ret %tmp0 = add i64 %x, -128 ; ~0U << (8-1) %tmp1 = icmp uge i64 %tmp0, -256 ; ~0U << 8 ret i1 %tmp1 @@ -180,14 +244,23 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nounwind { ; Slightly more canonical variant define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind { -; CHECK-LABEL: add_ugtcmp_i16_i8: -; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, #128 -; CHECK-NEXT: lsr w8, w8, #8 -; CHECK-NEXT: cmp w8, #254 -; CHECK-NEXT: cset w0, hi -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ugtcmp_i16_i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and w8, w0, #0xffff +; CHECK-SD-NEXT: sub w8, w8, #128 +; CHECK-SD-NEXT: lsr w8, w8, #8 +; CHECK-SD-NEXT: cmp w8, #254 +; CHECK-SD-NEXT: cset w0, hi +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ugtcmp_i16_i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-128 // =0xffffff80 +; CHECK-GI-NEXT: mov w9, #65279 // =0xfeff +; CHECK-GI-NEXT: add w8, w8, w0, uxth +; CHECK-GI-NEXT: cmp w8, w9 +; CHECK-GI-NEXT: cset w0, hi +; CHECK-GI-NEXT: ret %tmp0 = add i16 %x, -128 ; ~0U << (8-1) %tmp1 = icmp ugt i16 %tmp0, -257 ; ~0U << 8 - 1 ret i1 %tmp1 @@ -198,68 +271,113 @@ define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind { ; ---------------------------------------------------------------------------- ; define i1 @add_ultcmp_i16_i8(i16 %x) nounwind { -; CHECK-LABEL: add_ultcmp_i16_i8: -; CHECK: // %bb.0: -; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, w0, uxth -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ultcmp_i16_i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sxtb w8, w0 +; CHECK-SD-NEXT: and w8, w8, #0xffff +; CHECK-SD-NEXT: cmp w8, w0, uxth +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ultcmp_i16_i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, #128 +; CHECK-GI-NEXT: and w8, w8, #0xffff +; CHECK-GI-NEXT: cmp w8, #256 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %tmp0 = add i16 %x, 128 ; 1U << (8-1) %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8 ret i1 %tmp1 } define i1 @add_ultcmp_i32_i16(i32 %x) nounwind { -; CHECK-LABEL: add_ultcmp_i32_i16: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, w0, sxth -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ultcmp_i32_i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp w0, w0, sxth +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ultcmp_i32_i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, #8, lsl #12 // =32768 +; CHECK-GI-NEXT: cmp w8, #16, lsl #12 // =65536 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %tmp0 = add i32 %x, 32768 ; 1U << (16-1) %tmp1 = icmp ult i32 %tmp0, 65536 ; 1U << 16 ret i1 %tmp1 } define i1 @add_ultcmp_i32_i8(i32 %x) nounwind { -; CHECK-LABEL: add_ultcmp_i32_i8: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, w0, sxtb -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ultcmp_i32_i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp w0, w0, sxtb +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ultcmp_i32_i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, #128 +; CHECK-GI-NEXT: cmp w8, #256 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %tmp0 = add i32 %x, 128 ; 1U << (8-1) %tmp1 = icmp ult i32 %tmp0, 256 ; 1U << 8 ret i1 %tmp1 } define i1 @add_ultcmp_i64_i32(i64 %x) nounwind { -; CHECK-LABEL: add_ultcmp_i64_i32: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, w0, sxtw -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ultcmp_i64_i32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp x0, w0, sxtw +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ultcmp_i64_i32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-2147483648 // =0x80000000 +; CHECK-GI-NEXT: mov x9, #4294967296 // =0x100000000 +; CHECK-GI-NEXT: add x8, x0, x8 +; CHECK-GI-NEXT: cmp x8, x9 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %tmp0 = add i64 %x, 2147483648 ; 1U << (32-1) %tmp1 = icmp ult i64 %tmp0, 4294967296 ; 1U << 32 ret i1 %tmp1 } define i1 @add_ultcmp_i64_i16(i64 %x) nounwind { -; CHECK-LABEL: add_ultcmp_i64_i16: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, w0, sxth -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ultcmp_i64_i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp x0, w0, sxth +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ultcmp_i64_i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add x8, x0, #8, lsl #12 // =32768 +; CHECK-GI-NEXT: cmp x8, #16, lsl #12 // =65536 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %tmp0 = add i64 %x, 32768 ; 1U << (16-1) %tmp1 = icmp ult i64 %tmp0, 65536 ; 1U << 16 ret i1 %tmp1 } define i1 @add_ultcmp_i64_i8(i64 %x) nounwind { -; CHECK-LABEL: add_ultcmp_i64_i8: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, w0, sxtb -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ultcmp_i64_i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp x0, w0, sxtb +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ultcmp_i64_i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add x8, x0, #128 +; CHECK-GI-NEXT: cmp x8, #256 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %tmp0 = add i64 %x, 128 ; 1U << (8-1) %tmp1 = icmp ult i64 %tmp0, 256 ; 1U << 8 ret i1 %tmp1 @@ -267,13 +385,21 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nounwind { ; Slightly more canonical variant define i1 @add_ulecmp_i16_i8(i16 %x) nounwind { -; CHECK-LABEL: add_ulecmp_i16_i8: -; CHECK: // %bb.0: -; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, w0, uxth -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ulecmp_i16_i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sxtb w8, w0 +; CHECK-SD-NEXT: and w8, w8, #0xffff +; CHECK-SD-NEXT: cmp w8, w0, uxth +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ulecmp_i16_i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, #128 +; CHECK-GI-NEXT: and w8, w8, #0xffff +; CHECK-GI-NEXT: cmp w8, #255 +; CHECK-GI-NEXT: cset w0, ls +; CHECK-GI-NEXT: ret %tmp0 = add i16 %x, 128 ; 1U << (8-1) %tmp1 = icmp ule i16 %tmp0, 255 ; (1U << 8) - 1 ret i1 %tmp1 @@ -284,12 +410,20 @@ define i1 @add_ulecmp_i16_i8(i16 %x) nounwind { ; Adding not a constant define i1 @add_ultcmp_bad_i16_i8_add(i16 %x, i16 %y) nounwind { -; CHECK-LABEL: add_ultcmp_bad_i16_i8_add: -; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, w1 -; CHECK-NEXT: tst w8, #0xff00 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ultcmp_bad_i16_i8_add: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0xff00 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ultcmp_bad_i16_i8_add: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, w1 +; CHECK-GI-NEXT: and w8, w8, #0xffff +; CHECK-GI-NEXT: cmp w8, #256 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %tmp0 = add i16 %x, %y %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8 ret i1 %tmp1 @@ -311,12 +445,20 @@ define i1 @add_ultcmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind { ; Second constant is not larger than the first one define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind { -; CHECK-LABEL: add_ultcmp_bad_i8_i16: -; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: add w8, w8, #128 -; CHECK-NEXT: lsr w0, w8, #16 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ultcmp_bad_i8_i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: and w8, w0, #0xffff +; CHECK-SD-NEXT: add w8, w8, #128 +; CHECK-SD-NEXT: lsr w0, w8, #16 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ultcmp_bad_i8_i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: and w8, w0, #0xffff +; CHECK-GI-NEXT: add w8, w8, #128 +; CHECK-GI-NEXT: cmp w8, w8, uxth +; CHECK-GI-NEXT: cset w0, ne +; CHECK-GI-NEXT: ret %tmp0 = add i16 %x, 128 ; 1U << (8-1) %tmp1 = icmp ult i16 %tmp0, 128 ; 1U << (8-1) ret i1 %tmp1 @@ -324,12 +466,20 @@ define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind { ; First constant is not power of two define i1 @add_ultcmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind { -; CHECK-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo: -; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #192 -; CHECK-NEXT: tst w8, #0xff00 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add w8, w0, #192 +; CHECK-SD-NEXT: tst w8, #0xff00 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, #192 +; CHECK-GI-NEXT: and w8, w8, #0xffff +; CHECK-GI-NEXT: cmp w8, #256 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1)) %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8 ret i1 %tmp1 @@ -351,12 +501,20 @@ define i1 @add_ultcmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind { ; Magic check fails, 64 << 1 != 256 define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind { -; CHECK-LABEL: add_ultcmp_bad_i16_i8_magic: -; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #64 -; CHECK-NEXT: tst w8, #0xff00 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ultcmp_bad_i16_i8_magic: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add w8, w0, #64 +; CHECK-SD-NEXT: tst w8, #0xff00 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ultcmp_bad_i16_i8_magic: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, #64 +; CHECK-GI-NEXT: and w8, w8, #0xffff +; CHECK-GI-NEXT: cmp w8, #256 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %tmp0 = add i16 %x, 64 ; 1U << (8-1-1) %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8 ret i1 %tmp1 @@ -364,12 +522,20 @@ define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind { ; Bad 'destination type' define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind { -; CHECK-LABEL: add_ultcmp_bad_i16_i4: -; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #8 -; CHECK-NEXT: tst w8, #0xfff0 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ultcmp_bad_i16_i4: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add w8, w0, #8 +; CHECK-SD-NEXT: tst w8, #0xfff0 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ultcmp_bad_i16_i4: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, #8 +; CHECK-GI-NEXT: and w8, w8, #0xffff +; CHECK-GI-NEXT: cmp w8, #16 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %tmp0 = add i16 %x, 8 ; 1U << (4-1) %tmp1 = icmp ult i16 %tmp0, 16 ; 1U << 4 ret i1 %tmp1 @@ -377,12 +543,20 @@ define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind { ; Bad storage type define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind { -; CHECK-LABEL: add_ultcmp_bad_i24_i8: -; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #128 -; CHECK-NEXT: tst w8, #0xffff00 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_ultcmp_bad_i24_i8: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add w8, w0, #128 +; CHECK-SD-NEXT: tst w8, #0xffff00 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_ultcmp_bad_i24_i8: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, #128 +; CHECK-GI-NEXT: and w8, w8, #0xffffff +; CHECK-GI-NEXT: cmp w8, #256 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %tmp0 = add i24 %x, 128 ; 1U << (8-1) %tmp1 = icmp ult i24 %tmp0, 256 ; 1U << 8 ret i1 %tmp1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll new file mode 100644 index 0000000..e117200 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll @@ -0,0 +1,612 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=hawaii < %s | FileCheck -check-prefix=GFX7 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=fiji < %s | FileCheck -check-prefix=GFX8 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX11 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX12 %s + +define i16 @s_add_i16(i16 inreg %a, i16 inreg %b) { +; GFX7-LABEL: s_add_i16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_add_i32 s16, s16, s17 +; GFX7-NEXT: v_mov_b32_e32 v0, s16 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_add_i16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_add_i32 s16, s16, s17 +; GFX9-NEXT: v_mov_b32_e32 v0, s16 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_add_i16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: s_add_i32 s16, s16, s17 +; GFX8-NEXT: v_mov_b32_e32 v0, s16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_i16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_add_i32 s16, s16, s17 +; GFX10-NEXT: v_mov_b32_e32 v0, s16 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_add_i16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_add_i32 s0, s0, s1 +; GFX11-NEXT: v_mov_b32_e32 v0, s0 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_add_i16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_add_co_i32 s0, s0, s1 +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = add i16 %a, %b + ret i16 %c +} + +define i16 @v_add_i16(i16 %a, i16 %b) { +; GFX7-LABEL: v_add_i16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_add_i16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_u16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_add_i16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_u16_e32 v0, v0, v1 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_add_i16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_add_nc_u16 v0, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_add_i16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_add_nc_u16 v0.l, v0.l, v1.l +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_add_i16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_nc_u16 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = add i16 %a, %b + ret i16 %c +} + +define i32 @s_add_i32(i32 inreg %a, i32 inreg %b) { +; GFX7-LABEL: s_add_i32: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_add_i32 s16, s16, s17 +; GFX7-NEXT: v_mov_b32_e32 v0, s16 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_add_i32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_add_i32 s16, s16, s17 +; GFX9-NEXT: v_mov_b32_e32 v0, s16 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_add_i32: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: s_add_i32 s16, s16, s17 +; GFX8-NEXT: v_mov_b32_e32 v0, s16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_add_i32 s16, s16, s17 +; GFX10-NEXT: v_mov_b32_e32 v0, s16 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_add_i32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_add_i32 s0, s0, s1 +; GFX11-NEXT: v_mov_b32_e32 v0, s0 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_add_i32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_add_co_i32 s0, s0, s1 +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = add i32 %a, %b + ret i32 %c +} + +define i32 @v_add_i32(i32 %a, i32 %b) { +; GFX7-LABEL: v_add_i32: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_add_i32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_add_i32: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_add_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_add_i32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_add_i32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_nc_u32_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = add i32 %a, %b + ret i32 %c +} + +define <2 x i16> @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) { +; GFX7-LABEL: s_add_v2i16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_add_i32 s16, s16, s18 +; GFX7-NEXT: s_add_i32 s17, s17, s19 +; GFX7-NEXT: v_mov_b32_e32 v0, s16 +; GFX7-NEXT: v_mov_b32_e32 v1, s17 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_add_v2i16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_lshr_b32 s4, s16, 16 +; GFX9-NEXT: s_lshr_b32 s5, s17, 16 +; GFX9-NEXT: s_add_i32 s16, s16, s17 +; GFX9-NEXT: s_add_i32 s4, s4, s5 +; GFX9-NEXT: s_pack_ll_b32_b16 s4, s16, s4 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_add_v2i16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: s_lshr_b32 s4, s16, 16 +; GFX8-NEXT: s_lshr_b32 s5, s17, 16 +; GFX8-NEXT: s_add_i32 s4, s4, s5 +; GFX8-NEXT: s_add_i32 s16, s16, s17 +; GFX8-NEXT: s_and_b32 s4, 0xffff, s4 +; GFX8-NEXT: s_and_b32 s5, 0xffff, s16 +; GFX8-NEXT: s_lshl_b32 s4, s4, 16 +; GFX8-NEXT: s_or_b32 s4, s5, s4 +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_v2i16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_lshr_b32 s4, s16, 16 +; GFX10-NEXT: s_lshr_b32 s5, s17, 16 +; GFX10-NEXT: s_add_i32 s16, s16, s17 +; GFX10-NEXT: s_add_i32 s4, s4, s5 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, s16, s4 +; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_add_v2i16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_lshr_b32 s2, s0, 16 +; GFX11-NEXT: s_lshr_b32 s3, s1, 16 +; GFX11-NEXT: s_add_i32 s0, s0, s1 +; GFX11-NEXT: s_add_i32 s2, s2, s3 +; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s2 +; GFX11-NEXT: v_mov_b32_e32 v0, s0 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_add_v2i16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_lshr_b32 s2, s0, 16 +; GFX12-NEXT: s_lshr_b32 s3, s1, 16 +; GFX12-NEXT: s_add_co_i32 s0, s0, s1 +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: s_add_co_i32 s2, s2, s3 +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: s_pack_ll_b32_b16 s0, s0, s2 +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = add <2 x i16> %a, %b + ret <2 x i16> %c +} + +define <2 x i16> @v_add_v2i16(<2 x i16> %a, <2 x i16> %b) { +; GFX7-LABEL: v_add_v2i16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_add_v2i16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_add_u16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_add_v2i16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_u16_e32 v2, v0, v1 +; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_add_v2i16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_pk_add_u16 v0, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_add_v2i16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_add_u16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_add_v2i16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_pk_add_u16 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = add <2 x i16> %a, %b + ret <2 x i16> %c +} + +define i64 @s_add_i64(i64 inreg %a, i64 inreg %b) { +; GFX7-LABEL: s_add_i64: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_add_u32 s4, s16, s18 +; GFX7-NEXT: s_addc_u32 s5, s17, s19 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: v_mov_b32_e32 v1, s5 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_add_i64: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_add_u32 s4, s16, s18 +; GFX9-NEXT: s_addc_u32 s5, s17, s19 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_add_i64: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: s_add_u32 s4, s16, s18 +; GFX8-NEXT: s_addc_u32 s5, s17, s19 +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: v_mov_b32_e32 v1, s5 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_i64: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_add_u32 s4, s16, s18 +; GFX10-NEXT: s_addc_u32 s5, s17, s19 +; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: v_mov_b32_e32 v1, s5 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_add_i64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_add_u32 s0, s0, s2 +; GFX11-NEXT: s_addc_u32 s1, s1, s3 +; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_add_i64: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = add i64 %a, %b + ret i64 %c +} + +define i64 @v_add_i64(i64 %a, i64 %b) { +; GFX7-LABEL: v_add_i64: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_add_i64: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_add_i64: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_add_i64: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_add_i64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_add_i64: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT: s_wait_alu 0xfffd +; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = add i64 %a, %b + ret i64 %c +} + +define void @s_uaddo_uadde(i64 inreg %a, i64 inreg %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) { +; GFX7-LABEL: s_uaddo_uadde: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_add_u32 s4, s16, s18 +; GFX7-NEXT: s_addc_u32 s5, s17, s19 +; GFX7-NEXT: v_mov_b32_e32 v4, s4 +; GFX7-NEXT: s_mov_b32 s6, 0 +; GFX7-NEXT: s_cselect_b32 s8, 1, 0 +; GFX7-NEXT: v_mov_b32_e32 v5, s5 +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b64 s[4:5], 0 +; GFX7-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[4:7], 0 addr64 +; GFX7-NEXT: v_mov_b32_e32 v0, s8 +; GFX7-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_uaddo_uadde: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_add_u32 s4, s16, s18 +; GFX9-NEXT: s_addc_u32 s5, s17, s19 +; GFX9-NEXT: v_mov_b32_e32 v4, s4 +; GFX9-NEXT: s_cselect_b32 s6, 1, 0 +; GFX9-NEXT: v_mov_b32_e32 v5, s5 +; GFX9-NEXT: global_store_dwordx2 v[0:1], v[4:5], off +; GFX9-NEXT: v_mov_b32_e32 v0, s6 +; GFX9-NEXT: global_store_dword v[2:3], v0, off +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_uaddo_uadde: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: s_add_u32 s4, s16, s18 +; GFX8-NEXT: s_addc_u32 s5, s17, s19 +; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: s_cselect_b32 s6, 1, 0 +; GFX8-NEXT: v_mov_b32_e32 v5, s5 +; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v0, s6 +; GFX8-NEXT: flat_store_dword v[2:3], v0 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_uaddo_uadde: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_add_u32 s4, s16, s18 +; GFX10-NEXT: s_addc_u32 s5, s17, s19 +; GFX10-NEXT: s_cselect_b32 s6, 1, 0 +; GFX10-NEXT: v_mov_b32_e32 v4, s4 +; GFX10-NEXT: v_mov_b32_e32 v5, s5 +; GFX10-NEXT: v_mov_b32_e32 v6, s6 +; GFX10-NEXT: global_store_dwordx2 v[0:1], v[4:5], off +; GFX10-NEXT: global_store_dword v[2:3], v6, off +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_uaddo_uadde: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_add_u32 s0, s0, s2 +; GFX11-NEXT: s_addc_u32 s1, s1, s3 +; GFX11-NEXT: s_cselect_b32 s2, 1, 0 +; GFX11-NEXT: v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0 +; GFX11-NEXT: v_mov_b32_e32 v6, s2 +; GFX11-NEXT: global_store_b64 v[0:1], v[4:5], off +; GFX11-NEXT: global_store_b32 v[2:3], v6, off +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_uaddo_uadde: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_add_co_u32 s0, s0, s2 +; GFX12-NEXT: s_add_co_ci_u32 s1, s1, s3 +; GFX12-NEXT: s_cselect_b32 s2, 1, 0 +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0 +; GFX12-NEXT: v_mov_b32_e32 v6, s2 +; GFX12-NEXT: global_store_b64 v[0:1], v[4:5], off +; GFX12-NEXT: global_store_b32 v[2:3], v6, off +; GFX12-NEXT: s_setpc_b64 s[30:31] + %uaddo = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) + %add = extractvalue {i64, i1} %uaddo, 0 + %of = extractvalue {i64, i1} %uaddo, 1 + %of32 = select i1 %of, i32 1, i32 0 + store i64 %add, ptr addrspace(1) %res + store i32 %of32, ptr addrspace(1) %carry + ret void +} + +define void @v_uaddo_uadde(i64 %a, i64 %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) { +; GFX7-LABEL: v_uaddo_uadde: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX7-NEXT: s_mov_b32 s6, 0 +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b64 s[4:5], 0 +; GFX7-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX7-NEXT: buffer_store_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; GFX7-NEXT: buffer_store_dword v2, v[6:7], s[4:7], 0 addr64 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_uaddo_uadde: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX9-NEXT: global_store_dwordx2 v[4:5], v[0:1], off +; GFX9-NEXT: global_store_dword v[6:7], v2, off +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_uaddo_uadde: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX8-NEXT: flat_store_dwordx2 v[4:5], v[0:1] +; GFX8-NEXT: flat_store_dword v[6:7], v2 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_uaddo_uadde: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX10-NEXT: global_store_dwordx2 v[4:5], v[0:1], off +; GFX10-NEXT: global_store_dword v[6:7], v2, off +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_uaddo_uadde: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX11-NEXT: global_store_b64 v[4:5], v[0:1], off +; GFX11-NEXT: global_store_b32 v[6:7], v2, off +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_uaddo_uadde: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT: s_wait_alu 0xfffd +; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX12-NEXT: s_wait_alu 0xfffd +; GFX12-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX12-NEXT: global_store_b64 v[4:5], v[0:1], off +; GFX12-NEXT: global_store_b32 v[6:7], v2, off +; GFX12-NEXT: s_setpc_b64 s[30:31] + %uaddo = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) + %add = extractvalue {i64, i1} %uaddo, 0 + %of = extractvalue {i64, i1} %uaddo, 1 + %of32 = select i1 %of, i32 1, i32 0 + store i64 %add, ptr addrspace(1) %res + store i32 %of32, ptr addrspace(1) %carry + ret void +} + +declare {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.ll new file mode 100644 index 0000000..1a7ccf0 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.ll @@ -0,0 +1,66 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s + +define amdgpu_kernel void @fcmp_uniform_select(float %a, i32 %b, i32 %c, ptr addrspace(1) %out) { +; GFX7-LABEL: fcmp_uniform_select: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x9 +; GFX7-NEXT: s_load_dword s3, s[4:5], 0xb +; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd +; GFX7-NEXT: s_mov_b32 s2, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_cmp_eq_f32_e64 s[4:5], s6, 0 +; GFX7-NEXT: s_or_b64 s[4:5], s[4:5], s[4:5] +; GFX7-NEXT: s_cselect_b32 s4, 1, 0 +; GFX7-NEXT: s_and_b32 s4, s4, 1 +; GFX7-NEXT: s_cmp_lg_u32 s4, 0 +; GFX7-NEXT: s_cselect_b32 s3, s7, s3 +; GFX7-NEXT: v_mov_b32_e32 v0, s3 +; GFX7-NEXT: s_mov_b32 s3, 0xf000 +; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GFX7-NEXT: s_endpgm +; +; GFX8-LABEL: fcmp_uniform_select: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX8-NEXT: s_load_dword s6, s[4:5], 0x2c +; GFX8-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x34 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_cmp_eq_f32_e64 s[4:5], s0, 0 +; GFX8-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX8-NEXT: s_cselect_b32 s0, 1, 0 +; GFX8-NEXT: s_and_b32 s0, s0, 1 +; GFX8-NEXT: s_cmp_lg_u32 s0, 0 +; GFX8-NEXT: s_cselect_b32 s0, s1, s6 +; GFX8-NEXT: v_mov_b32_e32 v0, s2 +; GFX8-NEXT: v_mov_b32_e32 v2, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s3 +; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: s_endpgm +; +; GFX11-LABEL: fcmp_uniform_select: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_clause 0x2 +; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x2c +; GFX11-NEXT: s_load_b64 s[2:3], s[4:5], 0x34 +; GFX11-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_f32_e64 s0, s0, 0 +; GFX11-NEXT: s_cmp_lg_u32 s0, 0 +; GFX11-NEXT: s_cselect_b32 s0, 1, 0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_and_b32 s0, s0, 1 +; GFX11-NEXT: s_cmp_lg_u32 s0, 0 +; GFX11-NEXT: s_cselect_b32 s0, s1, s6 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_mov_b32_e32 v0, s0 +; GFX11-NEXT: global_store_b32 v1, v0, s[2:3] +; GFX11-NEXT: s_endpgm + %cmp = fcmp oeq float %a, 0.0 + %sel = select i1 %cmp, i32 %b, i32 %c + store i32 %sel, ptr addrspace(1) %out + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.mir new file mode 100644 index 0000000..67cc016 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.mir @@ -0,0 +1,37 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn -mcpu=gfx700 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck -check-prefixes=GFX7 %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx803 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck -check-prefixes=GF8 %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck -check-prefixes=GFX11 %s + +--- +name: test_copy_scc_vcc +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + ; GFX7-LABEL: name: test_copy_scc_vcc + ; GFX7: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF + ; GFX7-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[DEF]], [[DEF]], implicit-def $scc + ; GFX7-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $scc + ; GFX7-NEXT: $sgpr0 = COPY [[COPY]] + ; GFX7-NEXT: S_ENDPGM 0, implicit $sgpr0 + ; + ; GF8-LABEL: name: test_copy_scc_vcc + ; GF8: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF + ; GF8-NEXT: S_CMP_LG_U64 [[DEF]], 0, implicit-def $scc + ; GF8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $scc + ; GF8-NEXT: $sgpr0 = COPY [[COPY]] + ; GF8-NEXT: S_ENDPGM 0, implicit $sgpr0 + ; + ; GFX11-LABEL: name: test_copy_scc_vcc + ; GFX11: [[DEF:%[0-9]+]]:sreg_32_xm0_xexec = IMPLICIT_DEF + ; GFX11-NEXT: S_CMP_LG_U32 [[DEF]], 0, implicit-def $scc + ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $scc + ; GFX11-NEXT: $sgpr0 = COPY [[COPY]] + ; GFX11-NEXT: S_ENDPGM 0, implicit $sgpr0 + %0:vcc(s1) = G_IMPLICIT_DEF + %1:sgpr(s32) = G_AMDGPU_COPY_SCC_VCC %0 + $sgpr0 = COPY %1 + S_ENDPGM 0, implicit $sgpr0 +... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll index 7714c03..d3e2118 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll @@ -113,9 +113,9 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_and_b32 s0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 -; CHECK-NEXT: s_cmp_eq_u32 s0, 0 +; CHECK-NEXT: s_xor_b32 s0, s0, 1 +; CHECK-NEXT: s_and_b32 s0, s0, 1 +; CHECK-NEXT: s_cmp_lg_u32 s0, 0 ; CHECK-NEXT: s_cbranch_scc1 .LBB8_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 @@ -161,16 +161,17 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_and_b32 s0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 +; CHECK-NEXT: s_xor_b32 s0, s0, 1 +; CHECK-NEXT: s_xor_b32 s0, s0, 1 +; CHECK-NEXT: s_and_b32 s0, s0, 1 ; CHECK-NEXT: s_cmp_lg_u32 s0, 0 -; CHECK-NEXT: s_cbranch_scc0 .LBB10_2 -; CHECK-NEXT: ; %bb.1: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB10_3 -; CHECK-NEXT: .LBB10_2: ; %true +; CHECK-NEXT: s_cbranch_scc1 .LBB10_2 +; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB10_3 +; CHECK-NEXT: .LBB10_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB10_3 ; CHECK-NEXT: .LBB10_3: %c = trunc i32 %v to i1 %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) @@ -208,11 +209,7 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_cmp_lt_u32 s0, 12 -; CHECK-NEXT: s_cselect_b32 s0, 1, 0 -; CHECK-NEXT: s_and_b32 s0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 -; CHECK-NEXT: s_cmp_eq_u32 s0, 0 +; CHECK-NEXT: s_cmp_ge_u32 s0, 12 ; CHECK-NEXT: s_cbranch_scc1 .LBB12_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 @@ -258,17 +255,13 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_cmp_lt_u32 s0, 12 -; CHECK-NEXT: s_cselect_b32 s0, 1, 0 -; CHECK-NEXT: s_and_b32 s0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 -; CHECK-NEXT: s_cmp_lg_u32 s0, 0 -; CHECK-NEXT: s_cbranch_scc0 .LBB14_2 -; CHECK-NEXT: ; %bb.1: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB14_3 -; CHECK-NEXT: .LBB14_2: ; %true +; CHECK-NEXT: s_cbranch_scc1 .LBB14_2 +; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB14_3 +; CHECK-NEXT: .LBB14_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB14_3 ; CHECK-NEXT: .LBB14_3: %c = icmp ult i32 %v, 12 %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) @@ -310,14 +303,12 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_and: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_cmp_lt_u32 s0, 12 +; CHECK-NEXT: s_cmp_ge_u32 s0, 12 ; CHECK-NEXT: s_cselect_b32 s0, 1, 0 -; CHECK-NEXT: s_cmp_gt_u32 s1, 34 +; CHECK-NEXT: s_cmp_le_u32 s1, 34 ; CHECK-NEXT: s_cselect_b32 s1, 1, 0 -; CHECK-NEXT: s_and_b32 s0, s0, s1 -; CHECK-NEXT: s_and_b32 s0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 -; CHECK-NEXT: s_cmp_eq_u32 s0, 0 +; CHECK-NEXT: s_or_b32 s0, s0, s1 +; CHECK-NEXT: s_cmp_lg_u32 s0, 0 ; CHECK-NEXT: s_cbranch_scc1 .LBB16_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 @@ -372,16 +363,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg ; CHECK-NEXT: s_cmp_gt_u32 s1, 34 ; CHECK-NEXT: s_cselect_b32 s1, 1, 0 ; CHECK-NEXT: s_and_b32 s0, s0, s1 -; CHECK-NEXT: s_and_b32 s0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 ; CHECK-NEXT: s_cmp_lg_u32 s0, 0 -; CHECK-NEXT: s_cbranch_scc0 .LBB18_2 -; CHECK-NEXT: ; %bb.1: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB18_3 -; CHECK-NEXT: .LBB18_2: ; %true +; CHECK-NEXT: s_cbranch_scc1 .LBB18_2 +; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB18_3 +; CHECK-NEXT: .LBB18_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB18_3 ; CHECK-NEXT: .LBB18_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll index 7b81669..250fbc7 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll @@ -116,9 +116,9 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_and_b32 s0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0 -; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0 +; CHECK-NEXT: s_xor_b32 s0, s0, 1 +; CHECK-NEXT: s_and_b32 s0, s0, 1 +; CHECK-NEXT: s_cmp_lg_u32 s0, 0 ; CHECK-NEXT: s_cbranch_scc1 .LBB8_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 @@ -164,16 +164,17 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_and_b32 s0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0 -; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0 -; CHECK-NEXT: s_cbranch_scc0 .LBB10_2 -; CHECK-NEXT: ; %bb.1: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB10_3 -; CHECK-NEXT: .LBB10_2: ; %true +; CHECK-NEXT: s_xor_b32 s0, s0, 1 +; CHECK-NEXT: s_xor_b32 s0, s0, 1 +; CHECK-NEXT: s_and_b32 s0, s0, 1 +; CHECK-NEXT: s_cmp_lg_u32 s0, 0 +; CHECK-NEXT: s_cbranch_scc1 .LBB10_2 +; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB10_3 +; CHECK-NEXT: .LBB10_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB10_3 ; CHECK-NEXT: .LBB10_3: %c = trunc i32 %v to i1 %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) @@ -211,11 +212,7 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_cmp_lt_u32 s0, 12 -; CHECK-NEXT: s_cselect_b32 s0, 1, 0 -; CHECK-NEXT: s_and_b32 s0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0 -; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0 +; CHECK-NEXT: s_cmp_ge_u32 s0, 12 ; CHECK-NEXT: s_cbranch_scc1 .LBB12_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 @@ -261,17 +258,13 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_cmp_lt_u32 s0, 12 -; CHECK-NEXT: s_cselect_b32 s0, 1, 0 -; CHECK-NEXT: s_and_b32 s0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0 -; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0 -; CHECK-NEXT: s_cbranch_scc0 .LBB14_2 -; CHECK-NEXT: ; %bb.1: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB14_3 -; CHECK-NEXT: .LBB14_2: ; %true +; CHECK-NEXT: s_cbranch_scc1 .LBB14_2 +; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB14_3 +; CHECK-NEXT: .LBB14_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB14_3 ; CHECK-NEXT: .LBB14_3: %c = icmp ult i32 %v, 12 %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) @@ -313,14 +306,12 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_and: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_cmp_lt_u32 s0, 12 +; CHECK-NEXT: s_cmp_ge_u32 s0, 12 ; CHECK-NEXT: s_cselect_b32 s0, 1, 0 -; CHECK-NEXT: s_cmp_gt_u32 s1, 34 +; CHECK-NEXT: s_cmp_le_u32 s1, 34 ; CHECK-NEXT: s_cselect_b32 s1, 1, 0 -; CHECK-NEXT: s_and_b32 s0, s0, s1 -; CHECK-NEXT: s_and_b32 s0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0 -; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0 +; CHECK-NEXT: s_or_b32 s0, s0, s1 +; CHECK-NEXT: s_cmp_lg_u32 s0, 0 ; CHECK-NEXT: s_cbranch_scc1 .LBB16_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 @@ -375,16 +366,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg ; CHECK-NEXT: s_cmp_gt_u32 s1, 34 ; CHECK-NEXT: s_cselect_b32 s1, 1, 0 ; CHECK-NEXT: s_and_b32 s0, s0, s1 -; CHECK-NEXT: s_and_b32 s0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0 -; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0 -; CHECK-NEXT: s_cbranch_scc0 .LBB18_2 -; CHECK-NEXT: ; %bb.1: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB18_3 -; CHECK-NEXT: .LBB18_2: ; %true +; CHECK-NEXT: s_cmp_lg_u32 s0, 0 +; CHECK-NEXT: s_cbranch_scc1 .LBB18_2 +; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB18_3 +; CHECK-NEXT: .LBB18_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB18_3 ; CHECK-NEXT: .LBB18_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.mir new file mode 100644 index 0000000..097372a --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.mir @@ -0,0 +1,524 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - | FileCheck %s +--- +name: add_s16_ss +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $sgpr1 + ; CHECK-LABEL: name: add_s16_ss + ; CHECK: liveins: $sgpr0, $sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC]](s16) + ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC1]](s16) + ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[ANYEXT]], [[ANYEXT1]] + ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[ADD]](s32) + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC2]], [[TRUNC2]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s16) = G_TRUNC %0 + %3:_(s16) = G_TRUNC %1 + %4:_(s16) = G_ADD %2, %3 + %5:_(s16) = G_AND %4, %4 +... + +--- +name: add_s16_sv +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $vgpr0 + ; CHECK-LABEL: name: add_s16_sv + ; CHECK: liveins: $sgpr0, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC]](s16) + ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[COPY2]], [[TRUNC1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $vgpr0 + %2:_(s16) = G_TRUNC %0 + %3:_(s16) = G_TRUNC %1 + %4:_(s16) = G_ADD %2, %3 + %5:_(s16) = G_AND %4, %4 +... + +--- +name: add_s16_vs +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $vgpr0 + ; CHECK-LABEL: name: add_s16_vs + ; CHECK: liveins: $sgpr0, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC1]](s16) + ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[TRUNC]], [[COPY2]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $sgpr0 + %2:_(s16) = G_TRUNC %0 + %3:_(s16) = G_TRUNC %1 + %4:_(s16) = G_ADD %2, %3 + %5:_(s16) = G_AND %4, %4 +... + +--- +name: add_s16_vv +legalized: true + +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; CHECK-LABEL: name: add_s16_vv + ; CHECK: liveins: $vgpr0, $vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[TRUNC]], [[TRUNC1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s16) = G_TRUNC %0 + %3:_(s16) = G_TRUNC %1 + %4:_(s16) = G_ADD %2, %3 + %5:_(s16) = G_AND %4, %4 +... + +--- +name: add_s32_ss +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $sgpr1 + ; CHECK-LABEL: name: add_s32_ss + ; CHECK: liveins: $sgpr0, $sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[ADD]], [[ADD]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s32) = G_ADD %0, %1 + %3:_(s32) = G_AND %2, %2 +... + +--- +name: add_s32_sv +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $vgpr0 + ; CHECK-LABEL: name: add_s32_sv + ; CHECK: liveins: $sgpr0, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) + ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY2]], [[COPY1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[ADD]], [[ADD]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $vgpr0 + %2:_(s32) = G_ADD %0, %1 + %3:_(s32) = G_AND %2, %2 +... + +--- +name: add_s32_vs +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $vgpr0 + ; CHECK-LABEL: name: add_s32_vs + ; CHECK: liveins: $sgpr0, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) + ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY]], [[COPY2]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[ADD]], [[ADD]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $sgpr0 + %2:_(s32) = G_ADD %0, %1 + %3:_(s32) = G_AND %2, %2 +... + +--- +name: add_s32_vv +legalized: true + +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; CHECK-LABEL: name: add_s32_vv + ; CHECK: liveins: $vgpr0, $vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[ADD]], [[ADD]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = G_ADD %0, %1 + %3:_(s32) = G_AND %2, %2 +... + +--- +name: add_s64_ss +legalized: true + +body: | + bb.0: + liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 + ; CHECK-LABEL: name: add_s64_ss + ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr2_sgpr3 + ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s64) = G_ADD [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 255 + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s64) = G_AND [[ADD]], [[ADD]] + %0:_(s64) = COPY $sgpr0_sgpr1 + %1:_(s64) = COPY $sgpr2_sgpr3 + %2:_(s64) = G_ADD %0, %1 + %3:_(s64) = G_CONSTANT i64 255 + %4:_(s64) = G_AND %2, %2 +... + +--- +name: add_s64_sv +legalized: true + +body: | + bb.0: + liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 + ; CHECK-LABEL: name: add_s64_sv + ; CHECK: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY]](s64) + ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s64) = G_ADD [[COPY2]], [[COPY1]] + ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64) + ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64) + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]] + ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]] + ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) + %0:_(s64) = COPY $sgpr0_sgpr1 + %1:_(s64) = COPY $vgpr0_vgpr1 + %2:_(s64) = G_ADD %0, %1 + %3:_(s64) = G_AND %2, %2 +... + +--- +name: add_s64_vs +legalized: true + +body: | + bb.0: + liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 + ; CHECK-LABEL: name: add_s64_vs + ; CHECK: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY1]](s64) + ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s64) = G_ADD [[COPY]], [[COPY2]] + ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64) + ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64) + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]] + ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]] + ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s64) = COPY $sgpr0_sgpr1 + %2:_(s64) = G_ADD %0, %1 + %3:_(s64) = G_AND %2, %2 +... + +--- +name: add_s64_vv +legalized: true + +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 + ; CHECK-LABEL: name: add_s64_vv + ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3 + ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s64) = G_ADD [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64) + ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64) + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]] + ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]] + ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s64) = COPY $vgpr2_vgpr3 + %2:_(s64) = G_ADD %0, %1 + %3:_(s64) = G_AND %2, %2 +... + +--- +name: uaddo_s32_ss +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $sgpr1 + ; CHECK-LABEL: name: uaddo_s32_ss + ; CHECK: liveins: $sgpr0, $sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK-NEXT: [[UADDO:%[0-9]+]]:sgpr(s32), [[UADDO1:%[0-9]+]]:sgpr(s32) = G_UADDO [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[UADDO1]], [[C]] + ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C]], [[C1]] + ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s32) = G_AND [[SELECT]], [[UADDO]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s32), %3:_(s1) = G_UADDO %0, %1 + %4:_(s32) = G_ZEXT %3 + %5:_(s32) = G_AND %4, %2 +... + +--- +name: uaddo_s32_sv +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $vgpr1 + ; CHECK-LABEL: name: uaddo_s32_sv + ; CHECK: liveins: $sgpr0, $vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) + ; CHECK-NEXT: [[UADDO:%[0-9]+]]:vgpr(s32), [[UADDO1:%[0-9]+]]:vcc(s1) = G_UADDO [[COPY2]], [[COPY1]] + ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDO1]](s1), [[C]], [[C1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDO]], [[SELECT]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32), %3:_(s1) = G_UADDO %0, %1 + %4:_(s32) = G_ZEXT %3 + %5:_(s32) = G_AND %2, %4 +... + +--- +name: uaddo_s32_vs +legalized: true + +body: | + bb.0: + liveins: $vgpr0, $sgpr1 + ; CHECK-LABEL: name: uaddo_s32_vs + ; CHECK: liveins: $vgpr0, $sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) + ; CHECK-NEXT: [[UADDO:%[0-9]+]]:vgpr(s32), [[UADDO1:%[0-9]+]]:vcc(s1) = G_UADDO [[COPY]], [[COPY2]] + ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDO1]](s1), [[C]], [[C1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDO]], [[SELECT]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s32), %3:_(s1) = G_UADDO %0, %1 + %4:_(s32) = G_ZEXT %3 + %5:_(s32) = G_AND %2, %4 +... + +--- +name: uaddo_s32_vv +legalized: true + +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; CHECK-LABEL: name: uaddo_s32_vv + ; CHECK: liveins: $vgpr0, $vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[UADDO:%[0-9]+]]:vgpr(s32), [[UADDO1:%[0-9]+]]:vcc(s1) = G_UADDO [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDO1]](s1), [[C]], [[C1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDO]], [[SELECT]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32), %3:_(s1) = G_UADDO %0, %1 + %4:_(s32) = G_ZEXT %3 + %5:_(s32) = G_AND %2, %4 +... + +--- +name: uadde_s32_ss +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $sgpr1, $sgpr2 + ; CHECK-LABEL: name: uadde_s32_ss + ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 + ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY2]], [[C]] + ; CHECK-NEXT: [[UADDE:%[0-9]+]]:sgpr(s32), [[UADDE1:%[0-9]+]]:sgpr(s32) = G_UADDE [[COPY]], [[COPY1]], [[AND]] + ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s32) = G_AND [[UADDE1]], [[C]] + ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND1]](s32), [[C]], [[C1]] + ; CHECK-NEXT: [[AND2:%[0-9]+]]:sgpr(s32) = G_AND [[UADDE]], [[SELECT]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s32) = COPY $sgpr2 + %3:_(s1) = G_TRUNC %2 + %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3 + %6:_(s32) = G_ZEXT %5 + %7:_(s32) = G_AND %4, %6 +... + +--- +name: uadde_s32_sv +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $vgpr1, $sgpr2 + ; CHECK-LABEL: name: uadde_s32_sv + ; CHECK: liveins: $sgpr0, $vgpr1, $sgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) + ; CHECK-NEXT: [[AMDGPU_COPY_VCC_SCC:%[0-9]+]]:vcc(s1) = G_AMDGPU_COPY_VCC_SCC [[COPY2]](s32) + ; CHECK-NEXT: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY3]], [[COPY1]], [[AMDGPU_COPY_VCC_SCC]] + ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDE1]](s1), [[C]], [[C1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDE]], [[SELECT]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $sgpr2 + %3:_(s1) = G_TRUNC %2 + %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3 + %6:_(s32) = G_ZEXT %5 + %7:_(s32) = G_AND %4, %6 +... + +--- +name: uadde_s32_vs +legalized: true + +body: | + bb.0: + liveins: $vgpr0, $sgpr1, $sgpr2 + ; CHECK-LABEL: name: uadde_s32_vs + ; CHECK: liveins: $vgpr0, $sgpr1, $sgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) + ; CHECK-NEXT: [[AMDGPU_COPY_VCC_SCC:%[0-9]+]]:vcc(s1) = G_AMDGPU_COPY_VCC_SCC [[COPY2]](s32) + ; CHECK-NEXT: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY]], [[COPY3]], [[AMDGPU_COPY_VCC_SCC]] + ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDE1]](s1), [[C]], [[C1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDE]], [[SELECT]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s32) = COPY $sgpr2 + %3:_(s1) = G_TRUNC %2 + %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3 + %6:_(s32) = G_ZEXT %5 + %7:_(s32) = G_AND %4, %6 +... + +--- +name: uadde_s32_vv +legalized: true + +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2 + ; CHECK-LABEL: name: uadde_s32_vv + ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 + ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[COPY2]], [[C]] + ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[AND]](s32), [[C1]] + ; CHECK-NEXT: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY]], [[COPY1]], [[ICMP]] + ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDE1]](s1), [[C]], [[C1]] + ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UADDE]], [[SELECT]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %3:_(s1) = G_TRUNC %2 + %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3 + %6:_(s32) = G_ZEXT %5 + %7:_(s32) = G_AND %4, %6 +... + +--- +name: uadde_s32_ss_scc_use +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $sgpr1, $sgpr2 + ; CHECK-LABEL: name: uadde_s32_ss_scc_use + ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 + ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY2]], [[C]] + ; CHECK-NEXT: [[UADDE:%[0-9]+]]:sgpr(s32), [[UADDE1:%[0-9]+]]:sgpr(s32) = G_UADDE [[COPY]], [[COPY1]], [[AND]] + ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s32) = G_AND [[UADDE1]], [[C]] + ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND1]](s32), [[C]], [[C1]] + ; CHECK-NEXT: [[AND2:%[0-9]+]]:sgpr(s32) = G_AND [[UADDE]], [[SELECT]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s32) = COPY $sgpr2 + %3:_(s1) = G_TRUNC %2 + %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3 + %6:_(s32) = G_ZEXT %5 + %8:_(s32) = G_AND %4, %6 +... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir index 54ee69f..30c958f 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s -# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s +# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - | FileCheck %s --- name: add_s16_ss legalized: true @@ -19,13 +18,13 @@ body: | ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC1]](s16) ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[ANYEXT]], [[ANYEXT1]] ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[ADD]](s32) - ; CHECK-NEXT: S_ENDPGM 0, implicit [[TRUNC2]](s16) + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC2]], [[TRUNC2]] %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $sgpr1 %2:_(s16) = G_TRUNC %0 %3:_(s16) = G_TRUNC %1 %4:_(s16) = G_ADD %2, %3 - S_ENDPGM 0, implicit %4 + %5:_(s16) = G_AND %4, %4 ... --- @@ -44,13 +43,13 @@ body: | ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC]](s16) ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[COPY2]], [[TRUNC1]] - ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](s16) + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]] %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $vgpr0 %2:_(s16) = G_TRUNC %0 %3:_(s16) = G_TRUNC %1 %4:_(s16) = G_ADD %2, %3 - S_ENDPGM 0, implicit %4 + %5:_(s16) = G_AND %4, %4 ... --- @@ -69,13 +68,13 @@ body: | ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32) ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC1]](s16) ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[TRUNC]], [[COPY2]] - ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](s16) + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]] %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $sgpr0 %2:_(s16) = G_TRUNC %0 %3:_(s16) = G_TRUNC %1 %4:_(s16) = G_ADD %2, %3 - S_ENDPGM 0, implicit %4 + %5:_(s16) = G_AND %4, %4 ... --- @@ -93,11 +92,11 @@ body: | ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[TRUNC]], [[TRUNC1]] - ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](s16) + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]] %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s16) = G_TRUNC %0 %3:_(s16) = G_TRUNC %1 %4:_(s16) = G_ADD %2, %3 - S_ENDPGM 0, implicit %4 + %5:_(s16) = G_AND %4, %4 ... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir index 97018fa..01eb391 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir @@ -1,6 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - | FileCheck %s --- name: add_v2s16_ss @@ -18,16 +17,19 @@ body: | ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 ; CHECK-NEXT: [[LSHR:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST]], [[C]](s32) ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY1]](<2 x s16>) - ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 - ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C1]](s32) + ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C]](s32) ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[BITCAST]], [[BITCAST1]] ; CHECK-NEXT: [[ADD1:%[0-9]+]]:sgpr(s32) = G_ADD [[LSHR]], [[LSHR1]] ; CHECK-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ADD]](s32), [[ADD1]](s32) - ; CHECK-NEXT: S_ENDPGM 0, implicit [[BUILD_VECTOR_TRUNC]](<2 x s16>) + ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255 + ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C1]](s16) + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(<2 x s16>) = G_AND [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR]] %0:_(<2 x s16>) = COPY $sgpr0 %1:_(<2 x s16>) = COPY $sgpr1 %2:_(<2 x s16>) = G_ADD %0, %1 - S_ENDPGM 0, implicit %2 + %3:_(s16) = G_CONSTANT i16 255 + %4:_(<2 x s16>) = G_BUILD_VECTOR %3, %3 + %5:_(<2 x s16>) = G_AND %2, %4 ... --- @@ -44,11 +46,11 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY]](<2 x s16>) ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(<2 x s16>) = G_ADD [[COPY2]], [[COPY1]] - ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](<2 x s16>) + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[ADD]], [[ADD]] %0:_(<2 x s16>) = COPY $sgpr0 %1:_(<2 x s16>) = COPY $vgpr0 %2:_(<2 x s16>) = G_ADD %0, %1 - S_ENDPGM 0, implicit %2 + %3:_(<2 x s16>) = G_AND %2, %2 ... --- @@ -65,9 +67,11 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY1]](<2 x s16>) ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(<2 x s16>) = G_ADD [[COPY]], [[COPY2]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[ADD]], [[ADD]] %0:_(<2 x s16>) = COPY $vgpr0 %1:_(<2 x s16>) = COPY $sgpr0 %2:_(<2 x s16>) = G_ADD %0, %1 + %3:_(<2 x s16>) = G_AND %2, %2 ... --- @@ -83,9 +87,9 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr1 ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(<2 x s16>) = G_ADD [[COPY]], [[COPY1]] - ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](<2 x s16>) + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[ADD]], [[ADD]] %0:_(<2 x s16>) = COPY $vgpr0 %1:_(<2 x s16>) = COPY $vgpr1 %2:_(<2 x s16>) = G_ADD %0, %1 - S_ENDPGM 0, implicit %2 + %3:_(<2 x s16>) = G_AND %2, %2 ... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir index 7378c93..e0e783e 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir @@ -77,10 +77,14 @@ body: | ; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C1]], [[C2]] ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SELECT]](s32) + ; CHECK-NEXT: [[C3:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255 + ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC]], [[C3]] %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $sgpr1 %2:_(s1) = G_ICMP intpred(eq), %0, %1 %3:_(s16) = G_SEXT %2 + %4:_(s16) = G_CONSTANT i16 255 + %5:_(s16) = G_AND %3, %4 ... --- @@ -215,9 +219,13 @@ body: | ; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C1]], [[C2]] ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SELECT]](s32) + ; CHECK-NEXT: [[C3:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255 + ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC]], [[C3]] %0:_(s32) = COPY $sgpr0 %1:_(s1) = G_TRUNC %0 %2:_(s16) = G_SEXT %1 + %3:_(s16) = G_CONSTANT i16 255 + %4:_(s16) = G_AND %2, %3 ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir index b0199d3..e3c01c0 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir @@ -1,5 +1,107 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass="amdgpu-regbankselect,amdgpu-regbanklegalize" %s -o - | FileCheck %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s + +--- +name: sub_s16_ss +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $sgpr1 + ; CHECK-LABEL: name: sub_s16_ss + ; CHECK: liveins: $sgpr0, $sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC]](s16) + ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC1]](s16) + ; CHECK-NEXT: [[SUB:%[0-9]+]]:sgpr(s32) = G_SUB [[ANYEXT]], [[ANYEXT1]] + ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SUB]](s32) + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC2]], [[TRUNC2]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s16) = G_TRUNC %0 + %3:_(s16) = G_TRUNC %1 + %4:_(s16) = G_SUB %2, %3 + %6:_(s16) = G_AND %4, %4 +... + +--- +name: sub_s16_sv +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $vgpr0 + ; CHECK-LABEL: name: sub_s16_sv + ; CHECK: liveins: $sgpr0, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC]](s16) + ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s16) = G_SUB [[COPY2]], [[TRUNC1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[SUB]], [[SUB]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $vgpr0 + %2:_(s16) = G_TRUNC %0 + %3:_(s16) = G_TRUNC %1 + %4:_(s16) = G_SUB %2, %3 + %6:_(s16) = G_AND %4, %4 +... + +--- +name: sub_s16_vs +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $vgpr0 + ; CHECK-LABEL: name: sub_s16_vs + ; CHECK: liveins: $sgpr0, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC1]](s16) + ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s16) = G_SUB [[TRUNC]], [[COPY2]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[SUB]], [[SUB]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $sgpr0 + %2:_(s16) = G_TRUNC %0 + %3:_(s16) = G_TRUNC %1 + %4:_(s16) = G_SUB %2, %3 + %6:_(s16) = G_AND %4, %4 +... + +--- +name: sub_s16_vv +legalized: true + +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; CHECK-LABEL: name: sub_s16_vv + ; CHECK: liveins: $vgpr0, $vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s16) = G_SUB [[TRUNC]], [[TRUNC1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[SUB]], [[SUB]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s16) = G_TRUNC %0 + %3:_(s16) = G_TRUNC %1 + %4:_(s16) = G_SUB %2, %3 + %6:_(s16) = G_AND %4, %4 +... --- name: sub_s32_ss @@ -14,9 +116,11 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 ; CHECK-NEXT: [[SUB:%[0-9]+]]:sgpr(s32) = G_SUB [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[SUB]], [[SUB]] %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $sgpr1 %2:_(s32) = G_SUB %0, %1 + %4:_(s32) = G_AND %2, %2 ... --- @@ -33,9 +137,11 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[COPY2]], [[COPY1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[SUB]], [[SUB]] %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $vgpr0 %2:_(s32) = G_SUB %0, %1 + %4:_(s32) = G_AND %2, %2 ... --- @@ -52,9 +158,11 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[COPY]], [[COPY2]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[SUB]], [[SUB]] %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $sgpr0 %2:_(s32) = G_SUB %0, %1 + %4:_(s32) = G_AND %2, %2 ... --- @@ -70,7 +178,376 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[SUB]], [[SUB]] %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s32) = G_SUB %0, %1 + %4:_(s32) = G_AND %2, %2 +... + +--- +name: sub_v2s16_ss +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $sgpr1 + ; CHECK-LABEL: name: sub_v2s16_ss + ; CHECK: liveins: $sgpr0, $sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr1 + ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY]](<2 x s16>) + ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 + ; CHECK-NEXT: [[LSHR:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY1]](<2 x s16>) + ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; CHECK-NEXT: [[SUB:%[0-9]+]]:sgpr(s32) = G_SUB [[BITCAST]], [[BITCAST1]] + ; CHECK-NEXT: [[SUB1:%[0-9]+]]:sgpr(s32) = G_SUB [[LSHR]], [[LSHR1]] + ; CHECK-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[SUB]](s32), [[SUB1]](s32) + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(<2 x s16>) = G_AND [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC]] + %0:_(<2 x s16>) = COPY $sgpr0 + %1:_(<2 x s16>) = COPY $sgpr1 + %2:_(<2 x s16>) = G_SUB %0, %1 + %5:_(<2 x s16>) = G_AND %2, %2 +... + +--- +name: sub_v2s16_sv +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $vgpr0 + ; CHECK-LABEL: name: sub_v2s16_sv + ; CHECK: liveins: $sgpr0, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY]](<2 x s16>) + ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(<2 x s16>) = G_SUB [[COPY2]], [[COPY1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[SUB]], [[SUB]] + %0:_(<2 x s16>) = COPY $sgpr0 + %1:_(<2 x s16>) = COPY $vgpr0 + %2:_(<2 x s16>) = G_SUB %0, %1 + %5:_(<2 x s16>) = G_AND %2, %2 +... + +--- +name: sub_v2s16_vs +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $vgpr0 + ; CHECK-LABEL: name: sub_v2s16_vs + ; CHECK: liveins: $sgpr0, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY1]](<2 x s16>) + ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(<2 x s16>) = G_SUB [[COPY]], [[COPY2]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[SUB]], [[SUB]] + %0:_(<2 x s16>) = COPY $vgpr0 + %1:_(<2 x s16>) = COPY $sgpr0 + %2:_(<2 x s16>) = G_SUB %0, %1 + %5:_(<2 x s16>) = G_AND %2, %2 +... + +--- +name: sub_v2s16_vv +legalized: true + +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; CHECK-LABEL: name: sub_v2s16_vv + ; CHECK: liveins: $vgpr0, $vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr1 + ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(<2 x s16>) = G_SUB [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[SUB]], [[SUB]] + %0:_(<2 x s16>) = COPY $vgpr0 + %1:_(<2 x s16>) = COPY $vgpr1 + %2:_(<2 x s16>) = G_SUB %0, %1 + %5:_(<2 x s16>) = G_AND %2, %2 +... + +--- +name: sub_s64_ss +legalized: true + +body: | + bb.0: + liveins: $sgpr0_sgpr1, $sgpr0_sgpr1 + ; CHECK-LABEL: name: sub_s64_ss + ; CHECK: liveins: $sgpr0_sgpr1, $sgpr0_sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 + ; CHECK-NEXT: [[SUB:%[0-9]+]]:sgpr(s64) = G_SUB [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s64) = G_AND [[SUB]], [[SUB]] + %0:_(s64) = COPY $sgpr0_sgpr1 + %1:_(s64) = COPY $sgpr0_sgpr1 + %2:_(s64) = G_SUB %0, %1 + %4:_(s64) = G_AND %2, %2 +... + +--- +name: sub_s64_sv +legalized: true + +body: | + bb.0: + liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 + ; CHECK-LABEL: name: sub_s64_sv + ; CHECK: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY]](s64) + ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s64) = G_SUB [[COPY2]], [[COPY1]] + ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64) + ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64) + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]] + ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]] + ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) + %0:_(s64) = COPY $sgpr0_sgpr1 + %1:_(s64) = COPY $vgpr0_vgpr1 + %2:_(s64) = G_SUB %0, %1 + %4:_(s64) = G_AND %2, %2 +... + +--- +name: sub_s64_vs +legalized: true + +body: | + bb.0: + liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 + ; CHECK-LABEL: name: sub_s64_vs + ; CHECK: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY1]](s64) + ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s64) = G_SUB [[COPY]], [[COPY2]] + ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64) + ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64) + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]] + ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]] + ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s64) = COPY $sgpr0_sgpr1 + %2:_(s64) = G_SUB %0, %1 + %4:_(s64) = G_AND %2, %2 +... + +--- +name: sub_s64_vv +legalized: true + +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 + ; CHECK-LABEL: name: sub_s64_vv + ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3 + ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s64) = G_SUB [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64) + ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64) + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]] + ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]] + ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s64) = COPY $vgpr2_vgpr3 + %2:_(s64) = G_SUB %0, %1 + %4:_(s64) = G_AND %2, %2 +... + +--- +name: usubo_s32_ss +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $sgpr1 + ; CHECK-LABEL: name: usubo_s32_ss + ; CHECK: liveins: $sgpr0, $sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK-NEXT: [[USUBO:%[0-9]+]]:sgpr(s32), [[USUBO1:%[0-9]+]]:sgpr(s32) = G_USUBO [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[USUBO]], [[USUBO]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s32), %3:_(s1) = G_USUBO %0, %1 + %5:_(s32) = G_AND %2, %2 +... + +--- +name: usubo_s32_sv +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $vgpr1 + ; CHECK-LABEL: name: usubo_s32_sv + ; CHECK: liveins: $sgpr0, $vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) + ; CHECK-NEXT: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[COPY2]], [[COPY1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBO]], [[USUBO]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32), %3:_(s1) = G_USUBO %0, %1 + %5:_(s32) = G_AND %2, %2 +... + +--- +name: usubo_s32_vs +legalized: true + +body: | + bb.0: + liveins: $vgpr0, $sgpr1 + ; CHECK-LABEL: name: usubo_s32_vs + ; CHECK: liveins: $vgpr0, $sgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) + ; CHECK-NEXT: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[COPY]], [[COPY2]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBO]], [[USUBO]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s32), %3:_(s1) = G_USUBO %0, %1 + %5:_(s32) = G_AND %2, %2 +... + +--- +name: usubo_s32_vv +legalized: true + +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; CHECK-LABEL: name: usubo_s32_vv + ; CHECK: liveins: $vgpr0, $vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBO]], [[USUBO]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32), %3:_(s1) = G_USUBO %0, %1 + %5:_(s32) = G_AND %2, %2 +... + +--- +name: usube_s32_ss +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $sgpr1, $sgpr2 + ; CHECK-LABEL: name: usube_s32_ss + ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 + ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY2]], [[C]] + ; CHECK-NEXT: [[USUBE:%[0-9]+]]:sgpr(s32), [[USUBE1:%[0-9]+]]:sgpr(s32) = G_USUBE [[COPY]], [[COPY1]], [[AND]] + ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s32) = G_AND [[USUBE]], [[USUBE]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s32) = COPY $sgpr2 + %3:_(s1) = G_TRUNC %2 + %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3 + %7:_(s32) = G_AND %4, %4 +... + +--- +name: usube_s32_sv +legalized: true + +body: | + bb.0: + liveins: $sgpr0, $vgpr1, $sgpr2 + ; CHECK-LABEL: name: usube_s32_sv + ; CHECK: liveins: $sgpr0, $vgpr1, $sgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) + ; CHECK-NEXT: [[AMDGPU_COPY_VCC_SCC:%[0-9]+]]:vcc(s1) = G_AMDGPU_COPY_VCC_SCC [[COPY2]](s32) + ; CHECK-NEXT: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY3]], [[COPY1]], [[AMDGPU_COPY_VCC_SCC]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBE]], [[USUBE]] + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $sgpr2 + %3:_(s1) = G_TRUNC %2 + %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3 + %7:_(s32) = G_AND %4, %4 +... + +--- +name: usube_s32_vs +legalized: true + +body: | + bb.0: + liveins: $vgpr0, $sgpr1, $sgpr2 + ; CHECK-LABEL: name: usube_s32_vs + ; CHECK: liveins: $vgpr0, $sgpr1, $sgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) + ; CHECK-NEXT: [[AMDGPU_COPY_VCC_SCC:%[0-9]+]]:vcc(s1) = G_AMDGPU_COPY_VCC_SCC [[COPY2]](s32) + ; CHECK-NEXT: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY]], [[COPY3]], [[AMDGPU_COPY_VCC_SCC]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBE]], [[USUBE]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s32) = COPY $sgpr2 + %3:_(s1) = G_TRUNC %2 + %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3 + %7:_(s32) = G_AND %4, %4 +... + +--- +name: usube_s32_vv +legalized: true + +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2 + ; CHECK-LABEL: name: usube_s32_vv + ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 + ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[COPY2]], [[C]] + ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[AND]](s32), [[C1]] + ; CHECK-NEXT: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY]], [[COPY1]], [[ICMP]] + ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[USUBE]], [[USUBE]] + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %3:_(s1) = G_TRUNC %2 + %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3 + %7:_(s32) = G_AND %4, %4 ... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir index 088c20a3..d4baa5f 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir @@ -73,10 +73,14 @@ body: | ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C]], [[C1]] ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SELECT]](s32) + ; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255 + ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC]], [[C2]] %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $sgpr1 %2:_(s1) = G_ICMP intpred(eq), %0, %1 %3:_(s16) = G_ZEXT %2 + %4:_(s16) = G_CONSTANT i16 255 + %5:_(s16) = G_AND %3, %4 ... --- @@ -209,9 +213,13 @@ body: | ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C]], [[C1]] ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SELECT]](s32) + ; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255 + ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC]], [[C2]] %0:_(s32) = COPY $sgpr0 %1:_(s1) = G_TRUNC %0 %2:_(s16) = G_ZEXT %1 + %3:_(s16) = G_CONSTANT i16 255 + %4:_(s16) = G_AND %2, %3 ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.ll new file mode 100644 index 0000000..8b5958d --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=hawaii < %s | FileCheck -check-prefix=GFX7 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=fiji < %s | FileCheck -check-prefix=GFX8 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX11 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX12 %s + +define i16 @s_sub_i16(i16 inreg %a, i16 inreg %b) { +; GFX7-LABEL: s_sub_i16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_sub_i32 s4, s16, s17 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_sub_i16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_sub_i32 s4, s16, s17 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_sub_i16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: s_sub_i32 s4, s16, s17 +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_sub_i16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_sub_i32 s4, s16, s17 +; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_sub_i16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_sub_i32 s0, s0, s1 +; GFX11-NEXT: v_mov_b32_e32 v0, s0 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_sub_i16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_sub_co_i32 s0, s0, s1 +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = sub i16 %a, %b + ret i16 %c +} + +define i16 @v_sub_i16(i16 %a, i16 %b) { +; GFX7-LABEL: v_sub_i16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_sub_i32_e32 v0, vcc, v0, v1 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_sub_i16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_sub_i16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sub_i16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_sub_nc_u16 v0, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_sub_i16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_sub_nc_u16 v0.l, v0.l, v1.l +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_sub_i16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_nc_u16 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = sub i16 %a, %b + ret i16 %c +} + +define i32 @s_sub_i32(i32 inreg %a, i32 inreg %b) { +; GFX7-LABEL: s_sub_i32: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_sub_i32 s4, s16, s17 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_sub_i32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_sub_i32 s4, s16, s17 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_sub_i32: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: s_sub_i32 s4, s16, s17 +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_sub_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_sub_i32 s4, s16, s17 +; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_sub_i32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_sub_i32 s0, s0, s1 +; GFX11-NEXT: v_mov_b32_e32 v0, s0 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_sub_i32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_sub_co_i32 s0, s0, s1 +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = sub i32 %a, %b + ret i32 %c +} + +define i32 @v_sub_i32(i32 %a, i32 %b) { +; GFX7-LABEL: v_sub_i32: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_sub_i32_e32 v0, vcc, v0, v1 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_sub_i32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_sub_i32: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v1 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sub_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_sub_i32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_sub_nc_u32_e32 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_sub_i32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_nc_u32_e32 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = sub i32 %a, %b + ret i32 %c +} + +; TODO: Add test for s_sub_v2i16. Instruction selector currently fails +; to handle G_UNMERGE_VALUES. + +define <2 x i16> @v_sub_v2i16(<2 x i16> %a, <2 x i16> %b) { +; GFX7-LABEL: v_sub_v2i16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT: v_sub_i32_e32 v1, vcc, v1, v3 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_sub_v2i16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_sub_i16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_sub_v2i16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_sub_u16_e32 v2, v0, v1 +; GFX8-NEXT: v_sub_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sub_v2i16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_pk_sub_i16 v0, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_sub_v2i16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_sub_i16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_sub_v2i16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_pk_sub_i16 v0, v0, v1 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = sub <2 x i16> %a, %b + ret <2 x i16> %c +} + +define i64 @s_sub_i64(i64 inreg %a, i64 inreg %b) { +; GFX7-LABEL: s_sub_i64: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_sub_u32 s4, s16, s18 +; GFX7-NEXT: s_subb_u32 s5, s17, s19 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: v_mov_b32_e32 v1, s5 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_sub_i64: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_sub_u32 s4, s16, s18 +; GFX9-NEXT: s_subb_u32 s5, s17, s19 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_sub_i64: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: s_sub_u32 s4, s16, s18 +; GFX8-NEXT: s_subb_u32 s5, s17, s19 +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: v_mov_b32_e32 v1, s5 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_sub_i64: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_sub_u32 s4, s16, s18 +; GFX10-NEXT: s_subb_u32 s5, s17, s19 +; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: v_mov_b32_e32 v1, s5 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_sub_i64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_sub_u32 s0, s0, s2 +; GFX11-NEXT: s_subb_u32 s1, s1, s3 +; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_sub_i64: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_sub_nc_u64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = sub i64 %a, %b + ret i64 %c +} + +define i64 @v_sub_i64(i64 %a, i64 %b) { +; GFX7-LABEL: v_sub_i64: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_sub_i64: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_sub_i64: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sub_i64: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX10-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_sub_i64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT: v_sub_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_sub_i64: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT: s_wait_alu 0xfffd +; GFX12-NEXT: v_sub_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX12-NEXT: s_setpc_b64 s[30:31] + %c = sub i64 %a, %b + ret i64 %c +} + +define void @s_usubo_usube(i64 inreg %a, i64 inreg %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) { +; GFX7-LABEL: s_usubo_usube: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_sub_u32 s4, s16, s18 +; GFX7-NEXT: s_subb_u32 s5, s17, s19 +; GFX7-NEXT: v_mov_b32_e32 v4, s4 +; GFX7-NEXT: s_mov_b32 s6, 0 +; GFX7-NEXT: s_cselect_b32 s8, 1, 0 +; GFX7-NEXT: v_mov_b32_e32 v5, s5 +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b64 s[4:5], 0 +; GFX7-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[4:7], 0 addr64 +; GFX7-NEXT: v_mov_b32_e32 v0, s8 +; GFX7-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_usubo_usube: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_sub_u32 s4, s16, s18 +; GFX9-NEXT: s_subb_u32 s5, s17, s19 +; GFX9-NEXT: v_mov_b32_e32 v4, s4 +; GFX9-NEXT: s_cselect_b32 s6, 1, 0 +; GFX9-NEXT: v_mov_b32_e32 v5, s5 +; GFX9-NEXT: global_store_dwordx2 v[0:1], v[4:5], off +; GFX9-NEXT: v_mov_b32_e32 v0, s6 +; GFX9-NEXT: global_store_dword v[2:3], v0, off +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_usubo_usube: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: s_sub_u32 s4, s16, s18 +; GFX8-NEXT: s_subb_u32 s5, s17, s19 +; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: s_cselect_b32 s6, 1, 0 +; GFX8-NEXT: v_mov_b32_e32 v5, s5 +; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v0, s6 +; GFX8-NEXT: flat_store_dword v[2:3], v0 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_usubo_usube: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_sub_u32 s4, s16, s18 +; GFX10-NEXT: s_subb_u32 s5, s17, s19 +; GFX10-NEXT: s_cselect_b32 s6, 1, 0 +; GFX10-NEXT: v_mov_b32_e32 v4, s4 +; GFX10-NEXT: v_mov_b32_e32 v5, s5 +; GFX10-NEXT: v_mov_b32_e32 v6, s6 +; GFX10-NEXT: global_store_dwordx2 v[0:1], v[4:5], off +; GFX10-NEXT: global_store_dword v[2:3], v6, off +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_usubo_usube: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_sub_u32 s0, s0, s2 +; GFX11-NEXT: s_subb_u32 s1, s1, s3 +; GFX11-NEXT: s_cselect_b32 s2, 1, 0 +; GFX11-NEXT: v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0 +; GFX11-NEXT: v_mov_b32_e32 v6, s2 +; GFX11-NEXT: global_store_b64 v[0:1], v[4:5], off +; GFX11-NEXT: global_store_b32 v[2:3], v6, off +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_usubo_usube: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_sub_co_u32 s0, s0, s2 +; GFX12-NEXT: s_sub_co_ci_u32 s1, s1, s3 +; GFX12-NEXT: s_cselect_b32 s2, 1, 0 +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0 +; GFX12-NEXT: v_mov_b32_e32 v6, s2 +; GFX12-NEXT: global_store_b64 v[0:1], v[4:5], off +; GFX12-NEXT: global_store_b32 v[2:3], v6, off +; GFX12-NEXT: s_setpc_b64 s[30:31] + %usubo = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) + %sub = extractvalue {i64, i1} %usubo, 0 + %of = extractvalue {i64, i1} %usubo, 1 + %of32 = select i1 %of, i32 1, i32 0 + store i64 %sub, ptr addrspace(1) %res + store i32 %of32, ptr addrspace(1) %carry + ret void +} + +define void @v_usubo_usube(i64 %a, i64 %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) { +; GFX7-LABEL: v_usubo_usube: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc +; GFX7-NEXT: s_mov_b32 s6, 0 +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_mov_b64 s[4:5], 0 +; GFX7-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX7-NEXT: buffer_store_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; GFX7-NEXT: buffer_store_dword v2, v[6:7], s[4:7], 0 addr64 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_usubo_usube: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX9-NEXT: global_store_dwordx2 v[4:5], v[0:1], off +; GFX9-NEXT: global_store_dword v[6:7], v2, off +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_usubo_usube: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX8-NEXT: flat_store_dwordx2 v[4:5], v[0:1] +; GFX8-NEXT: flat_store_dword v[6:7], v2 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_usubo_usube: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX10-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX10-NEXT: global_store_dwordx2 v[4:5], v[0:1], off +; GFX10-NEXT: global_store_dword v[6:7], v2, off +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_usubo_usube: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX11-NEXT: global_store_b64 v[4:5], v[0:1], off +; GFX11-NEXT: global_store_b32 v[6:7], v2, off +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_usubo_usube: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT: s_wait_alu 0xfffd +; GFX12-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX12-NEXT: s_wait_alu 0xfffd +; GFX12-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX12-NEXT: global_store_b64 v[4:5], v[0:1], off +; GFX12-NEXT: global_store_b32 v[6:7], v2, off +; GFX12-NEXT: s_setpc_b64 s[30:31] + %usubo = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) + %sub = extractvalue {i64, i1} %usubo, 0 + %of = extractvalue {i64, i1} %usubo, 1 + %of32 = select i1 %of, i32 1, i32 0 + store i64 %sub, ptr addrspace(1) %res + store i32 %of32, ptr addrspace(1) %carry + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsic.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsic.ll new file mode 100644 index 0000000..34d4c51 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsic.ll @@ -0,0 +1,173 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -o - %s | FileCheck %s +define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) { +; CHECK-LABEL: readfirstlane_with_readfirstlane: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 5 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_endpgm + %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 5) + %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1) + store i32 %v2, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) { +; CHECK-LABEL: readfirstlane_with_readlane: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_bfe_u32 v1, v0, 10, 10 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_readfirstlane_b32 s2, v1 +; CHECK-NEXT: v_readlane_b32 s2, v0, s2 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) +; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_endpgm + %tidx = call i32 @llvm.amdgcn.workitem.id.x() + %tidy = call i32 @llvm.amdgcn.workitem.id.y() + %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy) + %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1) + store i32 %v2, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) { +; CHECK-LABEL: readlane_with_firstlane: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_readfirstlane_b32 s2, v0 +; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_endpgm + %tidx = call i32 @llvm.amdgcn.workitem.id.x() + %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 %tidx) + %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 3) + store i32 %v2, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) { +; CHECK-LABEL: readlane_readlane: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_bfe_u32 v1, v0, 10, 10 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_readfirstlane_b32 s2, v1 +; CHECK-NEXT: v_readlane_b32 s2, v0, s2 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) +; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_endpgm + %tidx = call i32 @llvm.amdgcn.workitem.id.x() + %tidy = call i32 @llvm.amdgcn.workitem.id.y() + %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy) + %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 2) + store i32 %v2, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) { +; CHECK-LABEL: permlane64_uniform: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_clause 0x1 +; CHECK-NEXT: s_load_b32 s2, s[4:5], 0x8 +; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_endpgm + %v = call i32 @llvm.amdgcn.permlane64(i32 %src) + store i32 %v, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) { +; CHECK-LABEL: permlane64_nonuniform: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) +; CHECK-NEXT: v_permlane64_b32 v1, v0 +; CHECK-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %v = call i32 @llvm.amdgcn.permlane64(i32 %tid) + %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid + store i32 %v, i32 addrspace(1)* %out_ptr + ret void +} + +define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %out) { +; CHECK-LABEL: permlane64_nonuniform_expression: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; CHECK-NEXT: v_add_nc_u32_e32 v1, 1, v0 +; CHECK-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; CHECK-NEXT: v_permlane64_b32 v1, v1 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid2 = add i32 %tid, 1 + %v = call i32 @llvm.amdgcn.permlane64(i32 %tid2) + %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid + store i32 %v, i32 addrspace(1)* %out_ptr + ret void +} + +define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1) %out) { +; CHECK-LABEL: trivial_waterfall_eq_zero: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 5 +; CHECK-NEXT: s_mov_b32 s2, 0 +; CHECK-NEXT: s_branch .LBB7_2 +; CHECK-NEXT: .LBB7_1: ; %Flow +; CHECK-NEXT: ; in Loop: Header=BB7_2 Depth=1 +; CHECK-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2 +; CHECK-NEXT: s_mov_b32 s2, -1 +; CHECK-NEXT: s_cbranch_vccz .LBB7_4 +; CHECK-NEXT: .LBB7_2: ; %while +; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: s_and_b32 vcc_lo, exec_lo, s2 +; CHECK-NEXT: s_mov_b32 s2, -1 +; CHECK-NEXT: s_cbranch_vccnz .LBB7_1 +; CHECK-NEXT: ; %bb.3: ; %if +; CHECK-NEXT: ; in Loop: Header=BB7_2 Depth=1 +; CHECK-NEXT: s_mov_b32 s2, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_branch .LBB7_1 +; CHECK-NEXT: .LBB7_4: ; %exit +; CHECK-NEXT: s_endpgm +entry: + br label %while + +while: + %done = phi i1 [ 0, %entry ], [ 1, %if ] + %not_done = xor i1 %done, true + %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done) + %is_done = icmp eq i64 %ballot, 0 ; in this case is_done = !not_done + br i1 %is_done, label %exit, label %if + +if: + store i32 5, ptr addrspace(1) %out + br label %while + +exit: + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll index 33ce278..c962c05 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-enable-uniform-intrinsic-combine=0 -O3 -S < %s | FileCheck %s -check-prefix=CURRENT-CHECK ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK +; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -O3 -S < %s | FileCheck %s -check-prefix=O3-CHECK define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1) %out) { diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll index a3e42e5..a7e828c 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-enable-uniform-intrinsic-combine=0 -O3 -S < %s | FileCheck %s -check-prefix=CURRENT-CHECK +; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s -check-prefix=DCE-CHECK diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll index 2fde3e3..7929261 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK +; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,instcombine,early-cse,simplifycfg -S < %s | FileCheck %s -check-prefix=COMB-CHECK ; This should not be optimized diff --git a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll index db32135..b8f084d 100644 --- a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll +++ b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll @@ -4,24 +4,14 @@ define amdgpu_gs i32 @main() { ; CHECK-LABEL: main: ; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_bitcmp1_b32 0, 0 ; CHECK-NEXT: s_mov_b32 s0, 0 -; CHECK-NEXT: s_cselect_b32 s1, -1, 0 -; CHECK-NEXT: s_or_saveexec_b32 s2, -1 -; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s1 -; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) -; CHECK-NEXT: v_readfirstlane_b32 s1, v0 -; CHECK-NEXT: s_mov_b32 exec_lo, s2 -; CHECK-NEXT: s_or_b32 s0, s0, s1 -; CHECK-NEXT: s_wait_alu 0xfffe +; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; CHECK-NEXT: s_bitcmp1_b32 s0, 0 ; CHECK-NEXT: s_cselect_b32 s0, -1, 0 -; CHECK-NEXT: s_wait_alu 0xfffe ; CHECK-NEXT: s_xor_b32 s0, s0, -1 -; CHECK-NEXT: s_wait_alu 0xfffe -; CHECK-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0 -; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) -; CHECK-NEXT: v_readfirstlane_b32 s0, v1 +; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: v_readfirstlane_b32 s0, v0 ; CHECK-NEXT: s_wait_alu 0xf1ff ; CHECK-NEXT: ; return to shader part epilog bb: diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll index 3aa3663..704ea37 100644 --- a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll +++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll @@ -9,11 +9,11 @@ ; RUN: | FileCheck -check-prefix=GCN-O3 %s -; GCN-O0: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O0>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(atomic-expand,verify,gc-lowering,lower-constant-intrinsics,unreachableblockelim,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa,require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,localstackalloc))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,phi-node-elimination,two-address-instruction,regallocfast,si-fix-vgpr-copies,remove-redundant-debug-values,fixup-statepoint-caller-saved,prolog-epilog,post-ra-pseudos,si-post-ra-bundler,fentry-insert,xray-instrumentation,patchable-function,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) +; GCN-O0: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O0>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-uniform-intrinsic-combine),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(atomic-expand,verify,gc-lowering,lower-constant-intrinsics,unreachableblockelim,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa,require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,localstackalloc))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,phi-node-elimination,two-address-instruction,regallocfast,si-fix-vgpr-copies,remove-redundant-debug-values,fixup-statepoint-caller-saved,prolog-epilog,post-ra-pseudos,si-post-ra-bundler,fentry-insert,xray-instrumentation,patchable-function,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) -; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O2>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) +; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O2>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt,amdgpu-uniform-intrinsic-combine),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) -; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O3>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) +; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O3>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt,amdgpu-uniform-intrinsic-combine),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) define void @empty() { ret void diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll index 6e52125..ee6caab 100644 --- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll +++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll @@ -31,6 +31,11 @@ ; GCN-O0-NEXT: AMDGPU Remove Incompatible Functions ; GCN-O0-NEXT: AMDGPU Printf lowering ; GCN-O0-NEXT: Lower ctors and dtors for AMDGPU +; GCN-O0-NEXT: FunctionPass Manager +; GCN-O0-NEXT: Dominator Tree Construction +; GCN-O0-NEXT: Cycle Info Analysis +; GCN-O0-NEXT: Uniformity Analysis +; GCN-O0-NEXT: AMDGPU Uniform Intrinsic Combine ; GCN-O0-NEXT: Expand variadic functions ; GCN-O0-NEXT: AMDGPU Inline All Functions ; GCN-O0-NEXT: Inliner for always_inline functions @@ -179,6 +184,11 @@ ; GCN-O1-NEXT: AMDGPU Remove Incompatible Functions ; GCN-O1-NEXT: AMDGPU Printf lowering ; GCN-O1-NEXT: Lower ctors and dtors for AMDGPU +; GCN-O1-NEXT: FunctionPass Manager +; GCN-O1-NEXT: Dominator Tree Construction +; GCN-O1-NEXT: Cycle Info Analysis +; GCN-O1-NEXT: Uniformity Analysis +; GCN-O1-NEXT: AMDGPU Uniform Intrinsic Combine ; GCN-O1-NEXT: Expand variadic functions ; GCN-O1-NEXT: AMDGPU Inline All Functions ; GCN-O1-NEXT: Inliner for always_inline functions @@ -466,6 +476,11 @@ ; GCN-O1-OPTS-NEXT: AMDGPU Remove Incompatible Functions ; GCN-O1-OPTS-NEXT: AMDGPU Printf lowering ; GCN-O1-OPTS-NEXT: Lower ctors and dtors for AMDGPU +; GCN-O1-OPTS-NEXT: FunctionPass Manager +; GCN-O1-OPTS-NEXT: Dominator Tree Construction +; GCN-O1-OPTS-NEXT: Cycle Info Analysis +; GCN-O1-OPTS-NEXT: Uniformity Analysis +; GCN-O1-OPTS-NEXT: AMDGPU Uniform Intrinsic Combine ; GCN-O1-OPTS-NEXT: Expand variadic functions ; GCN-O1-OPTS-NEXT: AMDGPU Inline All Functions ; GCN-O1-OPTS-NEXT: Inliner for always_inline functions @@ -783,6 +798,10 @@ ; GCN-O2-NEXT: Lower ctors and dtors for AMDGPU ; GCN-O2-NEXT: FunctionPass Manager ; GCN-O2-NEXT: AMDGPU Image Intrinsic Optimizer +; GCN-O2-NEXT: Dominator Tree Construction +; GCN-O2-NEXT: Cycle Info Analysis +; GCN-O2-NEXT: Uniformity Analysis +; GCN-O2-NEXT: AMDGPU Uniform Intrinsic Combine ; GCN-O2-NEXT: Expand variadic functions ; GCN-O2-NEXT: AMDGPU Inline All Functions ; GCN-O2-NEXT: Inliner for always_inline functions @@ -1104,6 +1123,10 @@ ; GCN-O3-NEXT: Lower ctors and dtors for AMDGPU ; GCN-O3-NEXT: FunctionPass Manager ; GCN-O3-NEXT: AMDGPU Image Intrinsic Optimizer +; GCN-O3-NEXT: Dominator Tree Construction +; GCN-O3-NEXT: Cycle Info Analysis +; GCN-O3-NEXT: Uniformity Analysis +; GCN-O3-NEXT: AMDGPU Uniform Intrinsic Combine ; GCN-O3-NEXT: Expand variadic functions ; GCN-O3-NEXT: AMDGPU Inline All Functions ; GCN-O3-NEXT: Inliner for always_inline functions diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll index e00e1f1..c1f3a12 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll @@ -110,9 +110,8 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_and_b32 s0, s0, 1 -; CHECK-NEXT: v_cmp_ne_u32_e64 vcc_lo, s0, 0 -; CHECK-NEXT: s_cbranch_vccz .LBB8_2 +; CHECK-NEXT: s_bitcmp0_b32 s0, 0 +; CHECK-NEXT: s_cbranch_scc1 .LBB8_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB8_3 @@ -156,15 +155,16 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_and_b32 s0, s0, 1 -; CHECK-NEXT: v_cmp_ne_u32_e64 vcc_lo, s0, 0 -; CHECK-NEXT: s_cbranch_vccz .LBB10_2 -; CHECK-NEXT: ; %bb.1: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB10_3 -; CHECK-NEXT: .LBB10_2: ; %true +; CHECK-NEXT: s_bitcmp1_b32 s0, 0 +; CHECK-NEXT: s_cselect_b32 s0, -1, 0 +; CHECK-NEXT: s_and_b32 vcc_lo, exec_lo, s0 +; CHECK-NEXT: s_cbranch_vccnz .LBB10_2 +; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB10_3 +; CHECK-NEXT: .LBB10_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB10_3 ; CHECK-NEXT: .LBB10_3: %c = trunc i32 %v to i1 %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) @@ -201,8 +201,8 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: v_cmp_lt_u32_e64 vcc_lo, s0, 12 -; CHECK-NEXT: s_cbranch_vccz .LBB12_2 +; CHECK-NEXT: s_cmp_gt_u32 s0, 11 +; CHECK-NEXT: s_cbranch_scc1 .LBB12_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB12_3 @@ -245,14 +245,14 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: v_cmp_lt_u32_e64 vcc_lo, s0, 12 -; CHECK-NEXT: s_cbranch_vccz .LBB14_2 -; CHECK-NEXT: ; %bb.1: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB14_3 -; CHECK-NEXT: .LBB14_2: ; %true +; CHECK-NEXT: s_cmp_lt_u32 s0, 12 +; CHECK-NEXT: s_cbranch_scc1 .LBB14_2 +; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB14_3 +; CHECK-NEXT: .LBB14_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB14_3 ; CHECK-NEXT: .LBB14_3: %c = icmp ult i32 %v, 12 %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) @@ -293,13 +293,13 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_and: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_cmp_lt_u32 s0, 12 +; CHECK-NEXT: s_cmp_gt_u32 s0, 11 ; CHECK-NEXT: s_cselect_b32 s0, -1, 0 -; CHECK-NEXT: s_cmp_gt_u32 s1, 34 +; CHECK-NEXT: s_cmp_lt_u32 s1, 35 ; CHECK-NEXT: s_cselect_b32 s1, -1, 0 -; CHECK-NEXT: s_and_b32 s0, s0, s1 -; CHECK-NEXT: s_and_b32 s0, s0, exec_lo -; CHECK-NEXT: s_cbranch_scc0 .LBB16_2 +; CHECK-NEXT: s_or_b32 s0, s0, s1 +; CHECK-NEXT: s_and_b32 vcc_lo, exec_lo, s0 +; CHECK-NEXT: s_cbranch_vccnz .LBB16_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB16_3 @@ -353,14 +353,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg ; CHECK-NEXT: s_cmp_gt_u32 s1, 34 ; CHECK-NEXT: s_cselect_b32 s1, -1, 0 ; CHECK-NEXT: s_and_b32 s0, s0, s1 -; CHECK-NEXT: s_and_b32 s0, s0, exec_lo -; CHECK-NEXT: s_cbranch_scc0 .LBB18_2 -; CHECK-NEXT: ; %bb.1: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB18_3 -; CHECK-NEXT: .LBB18_2: ; %true +; CHECK-NEXT: s_and_b32 vcc_lo, exec_lo, s0 +; CHECK-NEXT: s_cbranch_vccnz .LBB18_2 +; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB18_3 +; CHECK-NEXT: .LBB18_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB18_3 ; CHECK-NEXT: .LBB18_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 @@ -591,3 +591,24 @@ exit: store i32 %ballot, ptr addrspace(1) %out ret void } + +define amdgpu_cs i32 @compare_bfloats(bfloat %x, bfloat %y) { +; GFX10-LABEL: compare_bfloats: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX10-NEXT: v_cmp_gt_f32_e64 s0, v0, v1 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: compare_bfloats: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b16_e32 v2.l, 0 +; GFX11-NEXT: v_mov_b16_e32 v2.h, v1.l +; GFX11-NEXT: v_mov_b16_e32 v1.h, v0.l +; GFX11-NEXT: v_mov_b16_e32 v1.l, v2.l +; GFX11-NEXT: v_cmp_gt_f32_e64 s0, v1, v2 +; GFX11-NEXT: ; return to shader part epilog + %cmp = fcmp ogt bfloat %x, %y + %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %cmp) + ret i32 %ballot +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll index b4adf7f..827a01f 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll @@ -113,9 +113,8 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_and_b32 s0, s0, 1 -; CHECK-NEXT: v_cmp_ne_u32_e64 vcc, s0, 0 -; CHECK-NEXT: s_cbranch_vccz .LBB8_2 +; CHECK-NEXT: s_bitcmp0_b32 s0, 0 +; CHECK-NEXT: s_cbranch_scc1 .LBB8_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB8_3 @@ -159,15 +158,16 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_and_b32 s0, s0, 1 -; CHECK-NEXT: v_cmp_ne_u32_e64 vcc, s0, 0 -; CHECK-NEXT: s_cbranch_vccz .LBB10_2 -; CHECK-NEXT: ; %bb.1: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB10_3 -; CHECK-NEXT: .LBB10_2: ; %true +; CHECK-NEXT: s_bitcmp1_b32 s0, 0 +; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 +; CHECK-NEXT: s_and_b64 vcc, exec, s[0:1] +; CHECK-NEXT: s_cbranch_vccnz .LBB10_2 +; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB10_3 +; CHECK-NEXT: .LBB10_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB10_3 ; CHECK-NEXT: .LBB10_3: %c = trunc i32 %v to i1 %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) @@ -204,8 +204,8 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: v_cmp_lt_u32_e64 vcc, s0, 12 -; CHECK-NEXT: s_cbranch_vccz .LBB12_2 +; CHECK-NEXT: s_cmp_gt_u32 s0, 11 +; CHECK-NEXT: s_cbranch_scc1 .LBB12_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB12_3 @@ -248,14 +248,14 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: v_cmp_lt_u32_e64 vcc, s0, 12 -; CHECK-NEXT: s_cbranch_vccz .LBB14_2 -; CHECK-NEXT: ; %bb.1: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB14_3 -; CHECK-NEXT: .LBB14_2: ; %true +; CHECK-NEXT: s_cmp_lt_u32 s0, 12 +; CHECK-NEXT: s_cbranch_scc1 .LBB14_2 +; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB14_3 +; CHECK-NEXT: .LBB14_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB14_3 ; CHECK-NEXT: .LBB14_3: %c = icmp ult i32 %v, 12 %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) @@ -296,13 +296,13 @@ false: define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_and: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_cmp_lt_u32 s0, 12 +; CHECK-NEXT: s_cmp_gt_u32 s0, 11 ; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0 -; CHECK-NEXT: s_cmp_gt_u32 s1, 34 +; CHECK-NEXT: s_cmp_lt_u32 s1, 35 ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 -; CHECK-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1] -; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], exec -; CHECK-NEXT: s_cbranch_scc0 .LBB16_2 +; CHECK-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1] +; CHECK-NEXT: s_and_b64 vcc, exec, s[0:1] +; CHECK-NEXT: s_cbranch_vccnz .LBB16_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB16_3 @@ -356,14 +356,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg ; CHECK-NEXT: s_cmp_gt_u32 s1, 34 ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 ; CHECK-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1] -; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], exec -; CHECK-NEXT: s_cbranch_scc0 .LBB18_2 -; CHECK-NEXT: ; %bb.1: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB18_3 -; CHECK-NEXT: .LBB18_2: ; %true +; CHECK-NEXT: s_and_b64 vcc, exec, s[0:1] +; CHECK-NEXT: s_cbranch_vccnz .LBB18_2 +; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB18_3 +; CHECK-NEXT: .LBB18_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB18_3 ; CHECK-NEXT: .LBB18_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 @@ -557,3 +557,15 @@ exit: store i64 %ballot, ptr addrspace(1) %out ret void } + +define amdgpu_cs i64 @compare_bfloats(bfloat %x, bfloat %y) { +; CHECK-LABEL: compare_bfloats: +; CHECK: ; %bb.0: +; CHECK-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; CHECK-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; CHECK-NEXT: v_cmp_gt_f32_e64 s[0:1], v0, v1 +; CHECK-NEXT: ; return to shader part epilog + %cmp = fcmp ogt bfloat %x, %y + %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cmp) + ret i64 %ballot +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll index 6dd2258..39191d2 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll @@ -23,10 +23,8 @@ define amdgpu_kernel void @test_s_i32(ptr addrspace(1) %out, i32 %src0) { ; GFX11-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x2c ; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0 -; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX11-SDAG-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-SDAG-NEXT: s_endpgm ; ; GFX11-GISEL-LABEL: test_s_i32: @@ -36,8 +34,6 @@ define amdgpu_kernel void @test_s_i32(ptr addrspace(1) %out, i32 %src0) { ; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 -; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v0 ; GFX11-GISEL-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX11-GISEL-NEXT: s_endpgm %v = call i32 @llvm.amdgcn.permlane64.i32(i32 %src0) @@ -50,12 +46,9 @@ define amdgpu_kernel void @test_s_i64(ptr addrspace(1) %out, i64 %src0) { ; GFX11-SDAG: ; %bb.0: ; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s3 -; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s2 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v0 -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v2 -; GFX11-SDAG-NEXT: global_store_b64 v3, v[0:1], s[0:1] +; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s2 +; GFX11-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX11-SDAG-NEXT: s_endpgm ; ; GFX11-GISEL-LABEL: test_s_i64: @@ -64,9 +57,6 @@ define amdgpu_kernel void @test_s_i64(ptr addrspace(1) %out, i64 %src0) { ; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, 0 ; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3 -; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v0 -; GFX11-GISEL-NEXT: v_permlane64_b32 v1, v1 ; GFX11-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX11-GISEL-NEXT: s_endpgm %v = call i64 @llvm.amdgcn.permlane64.i64(i64 %src0) @@ -79,12 +69,9 @@ define amdgpu_kernel void @test_s_f64(ptr addrspace(1) %out, double %src0) { ; GFX11-SDAG: ; %bb.0: ; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s3 -; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s2 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v0 -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v2 -; GFX11-SDAG-NEXT: global_store_b64 v3, v[0:1], s[0:1] +; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s2 +; GFX11-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX11-SDAG-NEXT: s_endpgm ; ; GFX11-GISEL-LABEL: test_s_f64: @@ -93,9 +80,6 @@ define amdgpu_kernel void @test_s_f64(ptr addrspace(1) %out, double %src0) { ; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, 0 ; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3 -; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v0 -; GFX11-GISEL-NEXT: v_permlane64_b32 v1, v1 ; GFX11-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX11-GISEL-NEXT: s_endpgm %v = call double @llvm.amdgcn.permlane64.f64(double %src0) @@ -116,19 +100,15 @@ define amdgpu_kernel void @test_i_i32(ptr addrspace(1) %out) { ; GFX11-SDAG-LABEL: test_i_i32: ; GFX11-SDAG: ; %bb.0: ; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0x63 :: v_dual_mov_b32 v1, 0 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x63 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-SDAG-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-SDAG-NEXT: s_endpgm ; ; GFX11-GISEL-LABEL: test_i_i32: ; GFX11-GISEL: ; %bb.0: ; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX11-GISEL-NEXT: v_dual_mov_b32 v0, 0x63 :: v_dual_mov_b32 v1, 0 -; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v0 ; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-GISEL-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX11-GISEL-NEXT: s_endpgm @@ -141,19 +121,15 @@ define amdgpu_kernel void @test_i_f32(ptr addrspace(1) %out) { ; GFX11-SDAG-LABEL: test_i_f32: ; GFX11-SDAG: ; %bb.0: ; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0x449a5000 :: v_dual_mov_b32 v1, 0 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x449a5000 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-SDAG-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-SDAG-NEXT: s_endpgm ; ; GFX11-GISEL-LABEL: test_i_f32: ; GFX11-GISEL: ; %bb.0: ; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX11-GISEL-NEXT: v_dual_mov_b32 v0, 0x449a5000 :: v_dual_mov_b32 v1, 0 -; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v0 ; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-GISEL-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX11-GISEL-NEXT: s_endpgm @@ -166,23 +142,16 @@ define amdgpu_kernel void @test_i_i64(ptr addrspace(1) %out) { ; GFX11-SDAG-LABEL: test_i_i64: ; GFX11-SDAG: ; %bb.0: ; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, 0 -; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, 0x63 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v2 -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, 0x63 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] +; GFX11-SDAG-NEXT: global_store_b64 v1, v[0:1], s[0:1] ; GFX11-SDAG-NEXT: s_endpgm ; ; GFX11-GISEL-LABEL: test_i_i64: ; GFX11-GISEL: ; %bb.0: ; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0x63 -; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, 0 -; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v0 -; GFX11-GISEL-NEXT: v_permlane64_b32 v1, v2 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0 ; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX11-GISEL-NEXT: s_endpgm @@ -195,22 +164,16 @@ define amdgpu_kernel void @test_i_f64(ptr addrspace(1) %out) { ; GFX11-SDAG-LABEL: test_i_f64: ; GFX11-SDAG: ; %bb.0: ; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, 0x40934a00 -; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, 0 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v0 -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v2 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x40934a00 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] +; GFX11-SDAG-NEXT: global_store_b64 v0, v[0:1], s[0:1] ; GFX11-SDAG-NEXT: s_endpgm ; ; GFX11-GISEL-LABEL: test_i_f64: ; GFX11-GISEL: ; %bb.0: ; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, 0x40934a00 -; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v2 -; GFX11-GISEL-NEXT: v_permlane64_b32 v1, v1 +; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0x40934a00 :: v_dual_mov_b32 v2, 0 ; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX11-GISEL-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll index b0149f7..672b658 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll @@ -6,12 +6,9 @@ define amdgpu_kernel void @test_p0(ptr addrspace(1) %out, ptr %src0) { ; GFX11-SDAG: ; %bb.0: ; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s3 -; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s2 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v0 -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v2 -; GFX11-SDAG-NEXT: global_store_b64 v3, v[0:1], s[0:1] +; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s2 +; GFX11-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX11-SDAG-NEXT: s_endpgm %v = call ptr @llvm.amdgcn.permlane64.p0(ptr %src0) store ptr %v, ptr addrspace(1) %out @@ -22,21 +19,14 @@ define amdgpu_kernel void @test_v3p0(ptr addrspace(1) %out, <3 x ptr> %src0) { ; GFX11-SDAG-LABEL: test_v3p0: ; GFX11-SDAG: ; %bb.0: ; GFX11-SDAG-NEXT: s_clause 0x2 -; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x44 ; GFX11-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x54 +; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x44 ; GFX11-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x24 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v1, s2 -; GFX11-SDAG-NEXT: v_dual_mov_b32 v4, s1 :: v_dual_mov_b32 v5, s7 -; GFX11-SDAG-NEXT: v_mov_b32_e32 v8, s6 -; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, s3 :: v_dual_mov_b32 v7, s0 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-SDAG-NEXT: v_permlane64_b32 v2, v1 -; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v4 -; GFX11-SDAG-NEXT: v_permlane64_b32 v5, v5 -; GFX11-SDAG-NEXT: v_permlane64_b32 v4, v8 -; GFX11-SDAG-NEXT: v_permlane64_b32 v3, v0 -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v7 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v5, s7 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v1, s1 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s2 ; GFX11-SDAG-NEXT: s_clause 0x1 ; GFX11-SDAG-NEXT: global_store_b64 v6, v[4:5], s[4:5] offset:16 ; GFX11-SDAG-NEXT: global_store_b128 v6, v[0:3], s[4:5] @@ -53,10 +43,8 @@ define amdgpu_kernel void @test_p3(ptr addrspace(1) %out, ptr addrspace(3) %src0 ; GFX11-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x2c ; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0 -; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX11-SDAG-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-SDAG-NEXT: s_endpgm %v = call ptr addrspace(3) @llvm.amdgcn.permlane64.v3p0(ptr addrspace(3) %src0) store ptr addrspace(3) %v, ptr addrspace(1) %out @@ -70,14 +58,9 @@ define amdgpu_kernel void @test_v3p3(ptr addrspace(1) %out, <3 x ptr addrspace(3 ; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 ; GFX11-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x24 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v3, s0 -; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s1 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT: v_permlane64_b32 v2, v0 -; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v1 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v3 -; GFX11-SDAG-NEXT: global_store_b96 v4, v[0:2], s[4:5] +; GFX11-SDAG-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s0 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2 +; GFX11-SDAG-NEXT: global_store_b96 v3, v[0:2], s[4:5] ; GFX11-SDAG-NEXT: s_endpgm %v = call <3 x ptr addrspace(3)> @llvm.amdgcn.permlane64.v3p3(<3 x ptr addrspace(3)> %src0) store <3 x ptr addrspace(3)> %v, ptr addrspace(1) %out @@ -91,10 +74,8 @@ define amdgpu_kernel void @test_p5(ptr addrspace(1) %out, ptr addrspace(5) %src0 ; GFX11-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x2c ; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0 -; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX11-SDAG-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-SDAG-NEXT: s_endpgm %v = call ptr addrspace(5) @llvm.amdgcn.permlane64.p5(ptr addrspace(5) %src0) store ptr addrspace(5) %v, ptr addrspace(1) %out @@ -108,14 +89,9 @@ define amdgpu_kernel void @test_v3p5(ptr addrspace(1) %out, <3 x ptr addrspace(5 ; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 ; GFX11-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x24 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v3, s0 -; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s1 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT: v_permlane64_b32 v2, v0 -; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v1 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v3 -; GFX11-SDAG-NEXT: global_store_b96 v4, v[0:2], s[4:5] +; GFX11-SDAG-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s0 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2 +; GFX11-SDAG-NEXT: global_store_b96 v3, v[0:2], s[4:5] ; GFX11-SDAG-NEXT: s_endpgm %v = call <3 x ptr addrspace(5)> @llvm.amdgcn.permlane64.v3p5(<3 x ptr addrspace(5)> %src0) store <3 x ptr addrspace(5)> %v, ptr addrspace(1) %out @@ -129,10 +105,8 @@ define amdgpu_kernel void @test_p6(ptr addrspace(1) %out, ptr addrspace(6) %src0 ; GFX11-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x2c ; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0 -; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX11-SDAG-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX11-SDAG-NEXT: s_endpgm %v = call ptr addrspace(6) @llvm.amdgcn.permlane64.p6(ptr addrspace(6) %src0) store ptr addrspace(6) %v, ptr addrspace(1) %out @@ -146,14 +120,9 @@ define amdgpu_kernel void @test_v3p6(ptr addrspace(1) %out, <3 x ptr addrspace(6 ; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 ; GFX11-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x24 ; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v3, s0 -; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s1 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT: v_permlane64_b32 v2, v0 -; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v1 -; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v3 -; GFX11-SDAG-NEXT: global_store_b96 v4, v[0:2], s[4:5] +; GFX11-SDAG-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s0 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2 +; GFX11-SDAG-NEXT: global_store_b96 v3, v[0:2], s[4:5] ; GFX11-SDAG-NEXT: s_endpgm %v = call <3 x ptr addrspace(6)> @llvm.amdgcn.permlane64.v3p6(<3 x ptr addrspace(6)> %src0) store <3 x ptr addrspace(6)> %v, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll index d1ba892..02d2990 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll @@ -396,8 +396,7 @@ define amdgpu_kernel void @test_readfirstlane_imm_f64(ptr addrspace(1) %out) { ; ; CHECK-GISEL-LABEL: test_readfirstlane_imm_f64: ; CHECK-GISEL: ; %bb.0: -; CHECK-GISEL-NEXT: s_mov_b32 s0, 0 -; CHECK-GISEL-NEXT: s_mov_b32 s1, 0x40400000 +; CHECK-GISEL-NEXT: s_mov_b64 s[0:1], 0x4040000000000000 ; CHECK-GISEL-NEXT: ;;#ASMSTART ; CHECK-GISEL-NEXT: ; use s[0:1] ; CHECK-GISEL-NEXT: ;;#ASMEND @@ -456,14 +455,13 @@ define amdgpu_kernel void @test_readfirstlane_imm_fold_i64(ptr addrspace(1) %out ; CHECK-GISEL-LABEL: test_readfirstlane_imm_fold_i64: ; CHECK-GISEL: ; %bb.0: ; CHECK-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 -; CHECK-GISEL-NEXT: s_mov_b64 s[2:3], 32 ; CHECK-GISEL-NEXT: s_add_i32 s12, s12, s17 -; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, s2 +; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, 32 ; CHECK-GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13 +; CHECK-GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-GISEL-NEXT: v_mov_b32_e32 v3, s1 -; CHECK-GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 -; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s3 +; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, 0 ; CHECK-GISEL-NEXT: v_mov_b32_e32 v2, s0 ; CHECK-GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; CHECK-GISEL-NEXT: s_endpgm @@ -490,15 +488,13 @@ define amdgpu_kernel void @test_readfirstlane_imm_fold_f64(ptr addrspace(1) %out ; CHECK-GISEL-LABEL: test_readfirstlane_imm_fold_f64: ; CHECK-GISEL: ; %bb.0: ; CHECK-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 -; CHECK-GISEL-NEXT: s_mov_b32 s2, 0 ; CHECK-GISEL-NEXT: s_add_i32 s12, s12, s17 -; CHECK-GISEL-NEXT: s_mov_b32 s3, 0x40400000 -; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, s2 -; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-GISEL-NEXT: v_mov_b32_e32 v3, s1 +; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, 0 ; CHECK-GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CHECK-GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 -; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s3 +; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-GISEL-NEXT: v_mov_b32_e32 v3, s1 +; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, 0x40400000 ; CHECK-GISEL-NEXT: v_mov_b32_e32 v2, s0 ; CHECK-GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; CHECK-GISEL-NEXT: s_endpgm @@ -588,17 +584,17 @@ define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i64(ptr addrspace(1 ; CHECK-SDAG: ; %bb.0: ; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 ; CHECK-SDAG-NEXT: s_add_i32 s12, s12, s17 -; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13 -; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CHECK-SDAG-NEXT: ;;#ASMSTART ; CHECK-SDAG-NEXT: s_mov_b64 s[2:3], 0 ; CHECK-SDAG-NEXT: ;;#ASMEND +; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s2 +; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CHECK-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s1 -; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s2 -; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s3 -; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s0 -; CHECK-SDAG-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 +; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s1 +; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s3 +; CHECK-SDAG-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; CHECK-SDAG-NEXT: s_endpgm ; ; CHECK-GISEL-LABEL: test_readfirstlane_copy_from_sgpr_i64: @@ -628,17 +624,17 @@ define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_f64(ptr addrspace(1 ; CHECK-SDAG: ; %bb.0: ; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 ; CHECK-SDAG-NEXT: s_add_i32 s12, s12, s17 -; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13 -; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CHECK-SDAG-NEXT: ;;#ASMSTART ; CHECK-SDAG-NEXT: s_mov_b64 s[2:3], 0 ; CHECK-SDAG-NEXT: ;;#ASMEND +; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s2 +; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CHECK-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s1 -; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s2 -; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s3 -; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s0 -; CHECK-SDAG-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 +; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s1 +; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s3 +; CHECK-SDAG-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; CHECK-SDAG-NEXT: s_endpgm ; ; CHECK-GISEL-LABEL: test_readfirstlane_copy_from_sgpr_f64: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll index 7ff5eb4..0795f40 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll @@ -9,7 +9,7 @@ declare double @llvm.amdgcn.readlane.f64(double, i32) #0 define amdgpu_kernel void @test_readlane_sreg_sreg_i32(i32 %src0, i32 %src1) #1 { ; CHECK-SDAG-LABEL: test_readlane_sreg_sreg_i32: ; CHECK-SDAG: ; %bb.0: -; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 +; CHECK-SDAG-NEXT: s_load_dword s0, s[8:9], 0x0 ; CHECK-SDAG-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-SDAG-NEXT: ;;#ASMSTART ; CHECK-SDAG-NEXT: ; use s0 @@ -18,7 +18,7 @@ define amdgpu_kernel void @test_readlane_sreg_sreg_i32(i32 %src0, i32 %src1) #1 ; ; CHECK-GISEL-LABEL: test_readlane_sreg_sreg_i32: ; CHECK-GISEL: ; %bb.0: -; CHECK-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 +; CHECK-GISEL-NEXT: s_load_dword s0, s[8:9], 0x0 ; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-GISEL-NEXT: ;;#ASMSTART ; CHECK-GISEL-NEXT: ; use s0 @@ -224,14 +224,13 @@ define amdgpu_kernel void @test_readlane_imm_sreg_i64(ptr addrspace(1) %out, i32 ; CHECK-GISEL-LABEL: test_readlane_imm_sreg_i64: ; CHECK-GISEL: ; %bb.0: ; CHECK-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 -; CHECK-GISEL-NEXT: s_mov_b64 s[2:3], 32 ; CHECK-GISEL-NEXT: s_add_i32 s12, s12, s17 -; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, s2 +; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, 32 ; CHECK-GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13 +; CHECK-GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-GISEL-NEXT: v_mov_b32_e32 v3, s1 -; CHECK-GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 -; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s3 +; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, 0 ; CHECK-GISEL-NEXT: v_mov_b32_e32 v2, s0 ; CHECK-GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; CHECK-GISEL-NEXT: s_endpgm @@ -258,15 +257,13 @@ define amdgpu_kernel void @test_readlane_imm_sreg_f64(ptr addrspace(1) %out, i32 ; CHECK-GISEL-LABEL: test_readlane_imm_sreg_f64: ; CHECK-GISEL: ; %bb.0: ; CHECK-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 -; CHECK-GISEL-NEXT: s_mov_b32 s2, 0 ; CHECK-GISEL-NEXT: s_add_i32 s12, s12, s17 -; CHECK-GISEL-NEXT: s_mov_b32 s3, 0x40400000 -; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, s2 -; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-GISEL-NEXT: v_mov_b32_e32 v3, s1 +; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, 0 ; CHECK-GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CHECK-GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 -; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s3 +; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-GISEL-NEXT: v_mov_b32_e32 v3, s1 +; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, 0x40400000 ; CHECK-GISEL-NEXT: v_mov_b32_e32 v2, s0 ; CHECK-GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; CHECK-GISEL-NEXT: s_endpgm @@ -660,17 +657,17 @@ define amdgpu_kernel void @test_readlane_copy_from_sgpr_i64(ptr addrspace(1) %ou ; CHECK-SDAG: ; %bb.0: ; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 ; CHECK-SDAG-NEXT: s_add_i32 s12, s12, s17 -; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13 -; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CHECK-SDAG-NEXT: ;;#ASMSTART ; CHECK-SDAG-NEXT: s_mov_b64 s[2:3], 0 ; CHECK-SDAG-NEXT: ;;#ASMEND +; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s2 +; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CHECK-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s1 -; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s2 -; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s3 -; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s0 -; CHECK-SDAG-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 +; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s1 +; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s3 +; CHECK-SDAG-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; CHECK-SDAG-NEXT: s_endpgm ; ; CHECK-GISEL-LABEL: test_readlane_copy_from_sgpr_i64: @@ -700,17 +697,17 @@ define amdgpu_kernel void @test_readlane_copy_from_sgpr_f64(ptr addrspace(1) %ou ; CHECK-SDAG: ; %bb.0: ; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 ; CHECK-SDAG-NEXT: s_add_i32 s12, s12, s17 -; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13 -; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; CHECK-SDAG-NEXT: ;;#ASMSTART ; CHECK-SDAG-NEXT: s_mov_b64 s[2:3], 0 ; CHECK-SDAG-NEXT: ;;#ASMEND +; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s2 +; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13 ; CHECK-SDAG-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s1 -; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s2 -; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s3 -; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s0 -; CHECK-SDAG-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 +; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s1 +; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s3 +; CHECK-SDAG-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; CHECK-SDAG-NEXT: s_endpgm ; ; CHECK-GISEL-LABEL: test_readlane_copy_from_sgpr_f64: diff --git a/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll b/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll index c573253..48ed5c4 100644 --- a/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll +++ b/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll @@ -73,10 +73,10 @@ define amdgpu_kernel void @constant_zextload_v64i16_to_v64i32(ptr addrspace(1) % } ; CHECK-LABEL: {{^}}excess_soft_clause_reg_pressure: -; GFX908: NumSgprs: 64 -; GFX908-GCNTRACKERS: NumSgprs: 64 +; GFX908: NumSgprs: 56 +; GFX908-GCNTRACKERS: NumSgprs: 56 ; GFX908: NumVgprs: 43 -; GFX908-GCNTRACKERS: NumVgprs: 39 +; GFX908-GCNTRACKERS: NumVgprs: 40 ; GFX908: Occupancy: 5 ; GFX908-GCNTRACKERS: Occupancy: 6 diff --git a/llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll b/llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll index 586579f..ef96944 100644 --- a/llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll +++ b/llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll @@ -20,38 +20,33 @@ define void @test() { ; CHECK-NEXT: ; in Loop: Header=BB0_1 Depth=1 ; CHECK-NEXT: .LBB0_3: ; %bb.3 ; CHECK-NEXT: ; in Loop: Header=BB0_1 Depth=1 -; CHECK-NEXT: ; implicit-def: $sgpr4 -; CHECK-NEXT: v_mov_b32_e32 v0, s4 -; CHECK-NEXT: v_readfirstlane_b32 s6, v0 ; CHECK-NEXT: s_mov_b64 s[4:5], -1 -; CHECK-NEXT: s_mov_b32 s7, 0 -; CHECK-NEXT: s_cmp_eq_u32 s6, s7 ; CHECK-NEXT: ; implicit-def: $vgpr1 : SGPR spill to VGPR lane ; CHECK-NEXT: v_writelane_b32 v1, s4, 0 ; CHECK-NEXT: v_writelane_b32 v1, s5, 1 -; CHECK-NEXT: s_mov_b64 s[10:11], exec -; CHECK-NEXT: s_mov_b64 exec, -1 +; CHECK-NEXT: s_or_saveexec_b64 s[8:9], -1 +; CHECK-NEXT: s_nop 0 ; CHECK-NEXT: v_accvgpr_write_b32 a0, v1 ; Reload Reuse -; CHECK-NEXT: s_mov_b64 exec, s[10:11] +; CHECK-NEXT: s_mov_b64 exec, s[8:9] ; CHECK-NEXT: s_cbranch_scc1 .LBB0_5 ; CHECK-NEXT: ; %bb.4: ; %bb.4 ; CHECK-NEXT: ; in Loop: Header=BB0_1 Depth=1 -; CHECK-NEXT: s_or_saveexec_b64 s[10:11], -1 +; CHECK-NEXT: s_or_saveexec_b64 s[8:9], -1 ; CHECK-NEXT: v_accvgpr_read_b32 v1, a0 ; Reload Reuse -; CHECK-NEXT: s_mov_b64 exec, s[10:11] +; CHECK-NEXT: s_mov_b64 exec, s[8:9] ; CHECK-NEXT: s_mov_b64 s[4:5], 0 ; CHECK-NEXT: v_writelane_b32 v1, s4, 0 ; CHECK-NEXT: v_writelane_b32 v1, s5, 1 -; CHECK-NEXT: s_or_saveexec_b64 s[10:11], -1 +; CHECK-NEXT: s_or_saveexec_b64 s[8:9], -1 ; CHECK-NEXT: s_nop 0 ; CHECK-NEXT: v_accvgpr_write_b32 a0, v1 ; Reload Reuse -; CHECK-NEXT: s_mov_b64 exec, s[10:11] +; CHECK-NEXT: s_mov_b64 exec, s[8:9] ; CHECK-NEXT: .LBB0_5: ; %Flow ; CHECK-NEXT: ; in Loop: Header=BB0_1 Depth=1 -; CHECK-NEXT: s_or_saveexec_b64 s[10:11], -1 +; CHECK-NEXT: s_or_saveexec_b64 s[8:9], -1 ; CHECK-NEXT: s_nop 0 ; CHECK-NEXT: v_accvgpr_read_b32 v1, a0 ; Reload Reuse -; CHECK-NEXT: s_mov_b64 exec, s[10:11] +; CHECK-NEXT: s_mov_b64 exec, s[8:9] ; CHECK-NEXT: v_readlane_b32 s4, v1, 0 ; CHECK-NEXT: v_readlane_b32 s5, v1, 1 ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5] diff --git a/llvm/test/CodeGen/AMDGPU/spill_kill_v16.mir b/llvm/test/CodeGen/AMDGPU/spill_kill_v16.mir index 0c694d9..6989583 100644 --- a/llvm/test/CodeGen/AMDGPU/spill_kill_v16.mir +++ b/llvm/test/CodeGen/AMDGPU/spill_kill_v16.mir @@ -1,5 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -march=amdgcn -verify-machineinstrs -mcpu=gfx1100 -mattr=+real-true16 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=EXPANDED %s +# RUN: llc -march=amdgcn -verify-machineinstrs -mcpu=gfx1250 -mattr=+real-true16 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=SRAMECC-EXPANDED %s --- name: spill_restore_vgpr16 @@ -31,6 +32,28 @@ body: | ; EXPANDED-NEXT: $vgpr0_lo16 = SCRATCH_LOAD_SHORT_D16_SADDR_t16 $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.0, align 4, addrspace 5) ; EXPANDED-NEXT: $vgpr0_hi16 = SCRATCH_LOAD_SHORT_D16_SADDR_t16 $sgpr32, 4, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.1, align 4, addrspace 5) ; EXPANDED-NEXT: S_NOP 0, implicit killed renamable $vgpr0_lo16, implicit killed renamable $vgpr0_hi16 + ; + ; SRAMECC-EXPANDED-LABEL: name: spill_restore_vgpr16 + ; SRAMECC-EXPANDED: bb.0: + ; SRAMECC-EXPANDED-NEXT: successors: %bb.1(0x80000000) + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: S_NOP 0, implicit-def renamable $vgpr0_lo16, implicit-def renamable $vgpr0_hi16 + ; SRAMECC-EXPANDED-NEXT: SCRATCH_STORE_SHORT_SADDR_t16 killed $vgpr0_hi16, $sgpr32, 4, 0, implicit $exec, implicit $flat_scr :: (store (s16) into %stack.1, align 4, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: S_NOP 0, implicit renamable $vgpr0_lo16 + ; SRAMECC-EXPANDED-NEXT: SCRATCH_STORE_SHORT_SADDR_t16 killed $vgpr0_lo16, $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: (store (s16) into %stack.0, align 4, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: S_CBRANCH_SCC1 %bb.1, implicit undef $scc + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: bb.1: + ; SRAMECC-EXPANDED-NEXT: successors: %bb.2(0x80000000) + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: S_NOP 1 + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: bb.2: + ; SRAMECC-EXPANDED-NEXT: $vgpr1 = SCRATCH_LOAD_USHORT_SADDR $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.0, align 4, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: $vgpr0_lo16 = V_MOV_B16_t16_e64 0, killed $vgpr1_lo16, 0, implicit $exec + ; SRAMECC-EXPANDED-NEXT: $vgpr1 = SCRATCH_LOAD_USHORT_SADDR $sgpr32, 4, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.1, align 4, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: $vgpr0_hi16 = V_MOV_B16_t16_e64 0, killed $vgpr1_lo16, 0, implicit $exec + ; SRAMECC-EXPANDED-NEXT: S_NOP 0, implicit killed renamable $vgpr0_lo16, implicit killed renamable $vgpr0_hi16 bb.0: successors: %bb.1(0x80000000) S_NOP 0, implicit-def renamable $vgpr0_lo16, implicit-def renamable $vgpr0_hi16 @@ -78,6 +101,29 @@ body: | ; EXPANDED-NEXT: $vgpr0_lo16 = SCRATCH_LOAD_SHORT_D16_SADDR_t16 $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.0, align 4, addrspace 5) ; EXPANDED-NEXT: $vgpr0_hi16 = SCRATCH_LOAD_SHORT_D16_SADDR_t16 $sgpr32, 4, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.1, align 4, addrspace 5) ; EXPANDED-NEXT: S_NOP 0, implicit killed renamable $vgpr0_lo16, implicit killed renamable $vgpr0_hi16 + ; + ; SRAMECC-EXPANDED-LABEL: name: spill_restore_vgpr16_middle_of_block + ; SRAMECC-EXPANDED: bb.0: + ; SRAMECC-EXPANDED-NEXT: successors: %bb.1(0x80000000) + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: S_NOP 0, implicit-def renamable $vgpr0_lo16, implicit-def renamable $vgpr0_hi16 + ; SRAMECC-EXPANDED-NEXT: SCRATCH_STORE_SHORT_SADDR_t16 killed $vgpr0_hi16, $sgpr32, 4, 0, implicit $exec, implicit $flat_scr :: (store (s16) into %stack.1, align 4, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: S_NOP 0, implicit renamable $vgpr0_lo16 + ; SRAMECC-EXPANDED-NEXT: SCRATCH_STORE_SHORT_SADDR_t16 killed $vgpr0_lo16, $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: (store (s16) into %stack.0, align 4, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: S_CBRANCH_SCC1 %bb.1, implicit undef $scc + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: bb.1: + ; SRAMECC-EXPANDED-NEXT: successors: %bb.2(0x80000000) + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: S_NOP 1 + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: bb.2: + ; SRAMECC-EXPANDED-NEXT: S_NOP 1 + ; SRAMECC-EXPANDED-NEXT: $vgpr1 = SCRATCH_LOAD_USHORT_SADDR $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.0, align 4, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: $vgpr0_lo16 = V_MOV_B16_t16_e64 0, killed $vgpr1_lo16, 0, implicit $exec + ; SRAMECC-EXPANDED-NEXT: $vgpr1 = SCRATCH_LOAD_USHORT_SADDR $sgpr32, 4, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.1, align 4, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: $vgpr0_hi16 = V_MOV_B16_t16_e64 0, killed $vgpr1_lo16, 0, implicit $exec + ; SRAMECC-EXPANDED-NEXT: S_NOP 0, implicit killed renamable $vgpr0_lo16, implicit killed renamable $vgpr0_hi16 bb.0: successors: %bb.1(0x80000000) S_NOP 0, implicit-def renamable $vgpr0_lo16, implicit-def renamable $vgpr0_hi16 @@ -124,6 +170,27 @@ body: | ; EXPANDED-NEXT: bb.2: ; EXPANDED-NEXT: $vgpr0_lo16 = SCRATCH_LOAD_SHORT_D16_SADDR_t16 $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.0, align 4, addrspace 5) ; EXPANDED-NEXT: $vgpr0_hi16 = SCRATCH_LOAD_SHORT_D16_SADDR_t16 $sgpr32, 4, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.1, align 4, addrspace 5) + ; + ; SRAMECC-EXPANDED-LABEL: name: spill_restore_vgpr16_end_of_block + ; SRAMECC-EXPANDED: bb.0: + ; SRAMECC-EXPANDED-NEXT: successors: %bb.1(0x80000000) + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: S_NOP 0, implicit-def renamable $vgpr0_lo16, implicit-def renamable $vgpr0_hi16 + ; SRAMECC-EXPANDED-NEXT: SCRATCH_STORE_SHORT_SADDR_t16 killed $vgpr0_hi16, $sgpr32, 4, 0, implicit $exec, implicit $flat_scr :: (store (s16) into %stack.1, align 4, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: S_NOP 0, implicit renamable $vgpr0_lo16 + ; SRAMECC-EXPANDED-NEXT: SCRATCH_STORE_SHORT_SADDR_t16 killed $vgpr0_lo16, $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: (store (s16) into %stack.0, align 4, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: S_CBRANCH_SCC1 %bb.1, implicit undef $scc + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: bb.1: + ; SRAMECC-EXPANDED-NEXT: successors: %bb.2(0x80000000) + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: S_NOP 1 + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: bb.2: + ; SRAMECC-EXPANDED-NEXT: $vgpr1 = SCRATCH_LOAD_USHORT_SADDR $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.0, align 4, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: $vgpr0_lo16 = V_MOV_B16_t16_e64 0, killed $vgpr1_lo16, 0, implicit $exec + ; SRAMECC-EXPANDED-NEXT: $vgpr1 = SCRATCH_LOAD_USHORT_SADDR $sgpr32, 4, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.1, align 4, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: $vgpr0_hi16 = V_MOV_B16_t16_e64 0, killed $vgpr1_lo16, 0, implicit $exec bb.0: successors: %bb.1(0x80000000) S_NOP 0, implicit-def renamable $vgpr0_lo16, implicit-def renamable $vgpr0_hi16 diff --git a/llvm/test/CodeGen/AMDGPU/spillv16.ll b/llvm/test/CodeGen/AMDGPU/spillv16.ll index 0e45df2..2d54ac8 100644 --- a/llvm/test/CodeGen/AMDGPU/spillv16.ll +++ b/llvm/test/CodeGen/AMDGPU/spillv16.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 < %s | FileCheck %s -check-prefixes=GCN,GCN-TRUE16 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 < %s | FileCheck %s -check-prefixes=GCN,GCN-FAKE16 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -mattr=+real-true16 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 < %s | FileCheck %s -check-prefixes=GFX1250,GFX1250-TRUE16 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -mattr=-real-true16 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 < %s | FileCheck %s -check-prefixes=GFX1250,GFX1250-FAKE16 define void @spill_i16_alu() { ; GCN-TRUE16-LABEL: spill_i16_alu: @@ -32,6 +34,41 @@ define void @spill_i16_alu() { ; GCN-FAKE16-NEXT: scratch_store_b16 off, v0, s32 dlc ; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-TRUE16-LABEL: spill_i16_alu: +; GFX1250-TRUE16: ; %bb.0: ; %entry +; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_load_u16 v0, off, s32 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x7b, v0.l +; GFX1250-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:2 ; 2-byte Folded Spill +; GFX1250-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX1250-TRUE16-NEXT: ;;#ASMSTART +; GFX1250-TRUE16-NEXT: ;;#ASMEND +; GFX1250-TRUE16-NEXT: scratch_load_u16 v1, off, s32 offset:2 th:TH_LOAD_LU ; 2-byte Folded Reload +; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l +; GFX1250-TRUE16-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31] +; +; GFX1250-FAKE16-LABEL: spill_i16_alu: +; GFX1250-FAKE16: ; %bb.0: ; %entry +; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_load_u16 v0, off, s32 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-FAKE16-NEXT: v_add_nc_u16 v0, 0x7b, v0 +; GFX1250-FAKE16-NEXT: scratch_store_b32 off, v0, s32 offset:4 ; 4-byte Folded Spill +; GFX1250-FAKE16-NEXT: s_wait_xcnt 0x0 +; GFX1250-FAKE16-NEXT: ;;#ASMSTART +; GFX1250-FAKE16-NEXT: ;;#ASMEND +; GFX1250-FAKE16-NEXT: scratch_load_b32 v0, off, s32 offset:4 th:TH_LOAD_LU ; 4-byte Folded Reload +; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31] entry: %alloca = alloca i16, i32 1, align 4, addrspace(5) @@ -88,6 +125,51 @@ define void @spill_i16_alu_two_vals() { ; GCN-FAKE16-NEXT: scratch_store_b16 off, v0, s32 offset:4 dlc ; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-TRUE16-LABEL: spill_i16_alu_two_vals: +; GFX1250-TRUE16: ; %bb.0: ; %entry +; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_load_u16 v0, off, s32 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x7b, v0.l +; GFX1250-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:6 ; 2-byte Folded Spill +; GFX1250-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX1250-TRUE16-NEXT: ;;#ASMSTART +; GFX1250-TRUE16-NEXT: ;;#ASMEND +; GFX1250-TRUE16-NEXT: scratch_load_u16 v0, off, s32 offset:4 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_load_u16 v1, off, s32 offset:6 th:TH_LOAD_LU ; 2-byte Folded Reload +; GFX1250-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x7b, v0.l +; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l +; GFX1250-TRUE16-NEXT: scratch_store_d16_hi_b16 off, v0, s32 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:4 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31] +; +; GFX1250-FAKE16-LABEL: spill_i16_alu_two_vals: +; GFX1250-FAKE16: ; %bb.0: ; %entry +; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_load_u16 v0, off, s32 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-FAKE16-NEXT: v_add_nc_u16 v0, 0x7b, v0 +; GFX1250-FAKE16-NEXT: scratch_store_b32 off, v0, s32 offset:8 ; 4-byte Folded Spill +; GFX1250-FAKE16-NEXT: s_wait_xcnt 0x0 +; GFX1250-FAKE16-NEXT: ;;#ASMSTART +; GFX1250-FAKE16-NEXT: ;;#ASMEND +; GFX1250-FAKE16-NEXT: scratch_load_u16 v0, off, s32 offset:4 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_load_b32 v1, off, s32 offset:8 th:TH_LOAD_LU ; 4-byte Folded Reload +; GFX1250-FAKE16-NEXT: v_add_nc_u16 v0, 0x7b, v0 +; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_store_b16 off, v1, s32 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_store_b16 off, v0, s32 offset:4 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31] entry: %alloca = alloca i16, i32 1, align 4, addrspace(5) %alloca2 = alloca i16, i32 1, align 4, addrspace(5) @@ -140,6 +222,22 @@ define void @spill_i16() { ; GCN-FAKE16-NEXT: scratch_store_b16 off, v0, s32 dlc ; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: spill_i16: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_u16 v0, off, s32 scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: scratch_store_b32 off, v0, s32 offset:4 ; 4-byte Folded Spill +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: ;;#ASMSTART +; GFX1250-NEXT: ;;#ASMEND +; GFX1250-NEXT: scratch_load_b32 v0, off, s32 offset:4 th:TH_LOAD_LU ; 4-byte Folded Reload +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_storecnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] entry: %alloca = alloca i16, i32 1, align 4, addrspace(5) @@ -183,6 +281,22 @@ define void @spill_half() { ; GCN-FAKE16-NEXT: scratch_store_b16 off, v0, s32 dlc ; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: spill_half: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_u16 v0, off, s32 scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: scratch_store_b32 off, v0, s32 offset:4 ; 4-byte Folded Spill +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: ;;#ASMSTART +; GFX1250-NEXT: ;;#ASMEND +; GFX1250-NEXT: scratch_load_b32 v0, off, s32 offset:4 th:TH_LOAD_LU ; 4-byte Folded Reload +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_storecnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] entry: %alloca = alloca half, i32 1, align 4, addrspace(5) @@ -226,6 +340,22 @@ define void @spill_i16_from_v2i16() { ; GCN-FAKE16-NEXT: scratch_store_b16 off, v0, s32 offset:2 dlc ; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: spill_i16_from_v2i16: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_u16 v0, off, s32 offset:2 scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: scratch_store_b32 off, v0, s32 offset:8 ; 4-byte Folded Spill +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: ;;#ASMSTART +; GFX1250-NEXT: ;;#ASMEND +; GFX1250-NEXT: scratch_load_b32 v0, off, s32 offset:8 th:TH_LOAD_LU ; 4-byte Folded Reload +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: scratch_store_b16 off, v0, s32 offset:2 scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_storecnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] entry: %alloca = alloca <2 x i16>, i32 2, align 1, addrspace(5) @@ -283,6 +413,54 @@ define void @spill_2xi16_from_v2i16() { ; GCN-FAKE16-NEXT: scratch_store_b16 off, v0, s32 dlc ; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-TRUE16-LABEL: spill_2xi16_from_v2i16: +; GFX1250-TRUE16: ; %bb.0: ; %entry +; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_load_u16 v0, off, s32 offset:2 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-TRUE16-NEXT: s_clause 0x1 +; GFX1250-TRUE16-NEXT: scratch_store_b32 off, v0, s32 offset:12 +; GFX1250-TRUE16-NEXT: scratch_load_u16 v0, off, s32 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_store_b32 off, v0, s32 offset:8 ; 4-byte Folded Spill +; GFX1250-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX1250-TRUE16-NEXT: ;;#ASMSTART +; GFX1250-TRUE16-NEXT: ;;#ASMEND +; GFX1250-TRUE16-NEXT: scratch_load_b32 v0, off, s32 offset:12 th:TH_LOAD_LU ; 4-byte Folded Reload +; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:2 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_load_b32 v0, off, s32 offset:8 th:TH_LOAD_LU ; 4-byte Folded Reload +; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31] +; +; GFX1250-FAKE16-LABEL: spill_2xi16_from_v2i16: +; GFX1250-FAKE16: ; %bb.0: ; %entry +; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_load_u16 v0, off, s32 offset:2 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-FAKE16-NEXT: s_clause 0x1 +; GFX1250-FAKE16-NEXT: scratch_store_b32 off, v0, s32 offset:8 +; GFX1250-FAKE16-NEXT: scratch_load_u16 v0, off, s32 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_store_b32 off, v0, s32 offset:12 ; 4-byte Folded Spill +; GFX1250-FAKE16-NEXT: s_wait_xcnt 0x0 +; GFX1250-FAKE16-NEXT: ;;#ASMSTART +; GFX1250-FAKE16-NEXT: ;;#ASMEND +; GFX1250-FAKE16-NEXT: scratch_load_b32 v0, off, s32 offset:8 th:TH_LOAD_LU ; 4-byte Folded Reload +; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_store_b16 off, v0, s32 offset:2 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_load_b32 v0, off, s32 offset:12 th:TH_LOAD_LU ; 4-byte Folded Reload +; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31] entry: %alloca = alloca <2 x i16>, i32 2, align 1, addrspace(5) @@ -341,6 +519,47 @@ define void @spill_2xi16_from_v2i16_one_free_reg() { ; GCN-FAKE16-NEXT: scratch_store_b16 off, v0, s32 dlc ; GCN-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GCN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-TRUE16-LABEL: spill_2xi16_from_v2i16_one_free_reg: +; GFX1250-TRUE16: ; %bb.0: ; %entry +; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_load_u16 v7, off, s32 offset:2 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_load_u16 v0, off, s32 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_store_b32 off, v0, s32 offset:8 ; 4-byte Folded Spill +; GFX1250-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX1250-TRUE16-NEXT: ;;#ASMSTART +; GFX1250-TRUE16-NEXT: ;;#ASMEND +; GFX1250-TRUE16-NEXT: v_mov_b16_e32 v0.l, v7.l +; GFX1250-TRUE16-NEXT: scratch_store_b16 off, v0, s32 offset:2 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_load_b32 v0, off, s32 offset:8 th:TH_LOAD_LU ; 4-byte Folded Reload +; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-TRUE16-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31] +; +; GFX1250-FAKE16-LABEL: spill_2xi16_from_v2i16_one_free_reg: +; GFX1250-FAKE16: ; %bb.0: ; %entry +; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_load_u16 v7, off, s32 offset:2 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_load_u16 v0, off, s32 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_store_b32 off, v0, s32 offset:8 ; 4-byte Folded Spill +; GFX1250-FAKE16-NEXT: s_wait_xcnt 0x0 +; GFX1250-FAKE16-NEXT: ;;#ASMSTART +; GFX1250-FAKE16-NEXT: ;;#ASMEND +; GFX1250-FAKE16-NEXT: scratch_store_b16 off, v7, s32 offset:2 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_load_b32 v0, off, s32 offset:8 th:TH_LOAD_LU ; 4-byte Folded Reload +; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX1250-FAKE16-NEXT: scratch_store_b16 off, v0, s32 scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_wait_storecnt 0x0 +; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31] entry: %alloca = alloca <2 x i16>, i32 2, align 1, addrspace(5) @@ -375,6 +594,22 @@ define void @spill_v2i16() { ; GCN-NEXT: scratch_store_b32 off, v0, s32 offset:4 dlc ; GCN-NEXT: s_waitcnt_vscnt null, 0x0 ; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: spill_v2i16: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_b32 v0, off, s32 offset:4 scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: scratch_store_b32 off, v0, s32 offset:8 ; 4-byte Folded Spill +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: ;;#ASMSTART +; GFX1250-NEXT: ;;#ASMEND +; GFX1250-NEXT: scratch_load_b32 v0, off, s32 offset:8 th:TH_LOAD_LU ; 4-byte Folded Reload +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: scratch_store_b32 off, v0, s32 offset:4 scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_storecnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] entry: %alloca = alloca <2 x i16>, i32 2, align 1, addrspace(5) diff --git a/llvm/test/CodeGen/AMDGPU/spillv16.mir b/llvm/test/CodeGen/AMDGPU/spillv16.mir index 05569bf..ba2d926 100644 --- a/llvm/test/CodeGen/AMDGPU/spillv16.mir +++ b/llvm/test/CodeGen/AMDGPU/spillv16.mir @@ -1,6 +1,7 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -march=amdgcn -verify-machineinstrs -mcpu=gfx1100 -mattr=+real-true16 -run-pass=regallocfast -o - %s | FileCheck -check-prefix=SPILLED %s # RUN: llc -march=amdgcn -verify-machineinstrs -mcpu=gfx1100 -mattr=+real-true16 -run-pass=regallocfast,prologepilog -o - %s | FileCheck -check-prefix=EXPANDED %s +# RUN: llc -march=amdgcn -verify-machineinstrs -mcpu=gfx1250 -mattr=+real-true16 -run-pass=regallocfast,prologepilog -o - %s | FileCheck -check-prefix=SRAMECC-EXPANDED %s --- name: spill_restore_vgpr16 @@ -46,6 +47,27 @@ body: | ; EXPANDED-NEXT: $vgpr0_lo16 = SCRATCH_LOAD_SHORT_D16_SADDR_t16 $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.0, addrspace 5) ; EXPANDED-NEXT: $vgpr0_hi16 = SCRATCH_LOAD_SHORT_D16_SADDR_t16 $sgpr32, 2, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.1, addrspace 5) ; EXPANDED-NEXT: S_NOP 0, implicit killed renamable $vgpr0_lo16, implicit killed renamable $vgpr0_hi16 + ; + ; SRAMECC-EXPANDED-LABEL: name: spill_restore_vgpr16 + ; SRAMECC-EXPANDED: bb.0: + ; SRAMECC-EXPANDED-NEXT: successors: %bb.1(0x80000000) + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: S_NOP 0, implicit-def renamable $vgpr0_lo16, implicit-def renamable $vgpr0_hi16 + ; SRAMECC-EXPANDED-NEXT: SCRATCH_STORE_SHORT_SADDR_t16 killed $vgpr0_hi16, $sgpr32, 2, 0, implicit $exec, implicit $flat_scr :: (store (s16) into %stack.1, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: SCRATCH_STORE_SHORT_SADDR_t16 killed $vgpr0_lo16, $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: (store (s16) into %stack.0, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: S_CBRANCH_SCC1 %bb.1, implicit undef $scc + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: bb.1: + ; SRAMECC-EXPANDED-NEXT: successors: %bb.2(0x80000000) + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: S_NOP 1 + ; SRAMECC-EXPANDED-NEXT: {{ $}} + ; SRAMECC-EXPANDED-NEXT: bb.2: + ; SRAMECC-EXPANDED-NEXT: $vgpr1 = SCRATCH_LOAD_USHORT_SADDR $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.0, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: $vgpr0_lo16 = V_MOV_B16_t16_e64 0, killed $vgpr1_lo16, 0, implicit $exec + ; SRAMECC-EXPANDED-NEXT: $vgpr1 = SCRATCH_LOAD_USHORT_SADDR $sgpr32, 2, 0, implicit $exec, implicit $flat_scr :: (load (s16) from %stack.1, addrspace 5) + ; SRAMECC-EXPANDED-NEXT: $vgpr0_hi16 = V_MOV_B16_t16_e64 0, killed $vgpr1_lo16, 0, implicit $exec + ; SRAMECC-EXPANDED-NEXT: S_NOP 0, implicit killed renamable $vgpr0_lo16, implicit killed renamable $vgpr0_hi16 bb.0: S_NOP 0, implicit-def %0:vgpr_16, implicit-def %1:vgpr_16 S_CBRANCH_SCC1 implicit undef $scc, %bb.1 diff --git a/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll b/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll index 5aafb0f..364598f 100644 --- a/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll +++ b/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll @@ -31,8 +31,8 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x ; CHECK-NEXT: [[COPY13:%[0-9]+]]:sgpr_32 = COPY $sgpr10 ; CHECK-NEXT: [[COPY14:%[0-9]+]]:sgpr_32 = COPY $sgpr8 ; CHECK-NEXT: undef [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub0_sub1:sgpr_128 = S_LOAD_DWORDX2_IMM [[COPY]], 232, 0 :: (invariant load (s64) from %ir.39, addrspace 4) - ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %125:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32)) - ; CHECK-NEXT: KILL undef %125:sgpr_128 + ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %117:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32)) + ; CHECK-NEXT: KILL undef %117:sgpr_128 ; CHECK-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY5]], 4, implicit-def dead $scc ; CHECK-NEXT: [[S_LSHL_B32_1:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY4]], 4, implicit-def dead $scc ; CHECK-NEXT: [[S_LSHL_B32_2:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY3]], 4, implicit-def dead $scc @@ -44,87 +44,85 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x ; CHECK-NEXT: [[S_SUB_I32_1:%[0-9]+]]:sreg_32 = S_SUB_I32 [[S_BUFFER_LOAD_DWORD_IMM]], 30, implicit-def dead $scc ; CHECK-NEXT: undef [[S_ADD_U32_:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_2]], implicit-def $scc ; CHECK-NEXT: [[S_ADD_U32_:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_]], 16, 0 :: (invariant load (s128) from %ir.81, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_]], 16, 0 :: (invariant load (s128) from %ir.71, addrspace 4) ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM1:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM undef %74:sreg_64, 0, 0 :: (invariant load (s128) from `ptr addrspace(4) poison`, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM2:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_]], 64, 0 :: (invariant load (s128) from %ir.88, addrspace 4) ; CHECK-NEXT: KILL undef %74:sreg_64 ; CHECK-NEXT: KILL [[S_ADD_U32_]].sub0, [[S_ADD_U32_]].sub1 ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[S_LOAD_DWORDX4_IMM]], 0, 0 :: (dereferenceable invariant load (s32)) ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; CHECK-NEXT: undef [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_128 = S_MOV_B32 0 - ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET undef %118:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], undef %89:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET undef %112:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], undef %87:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM1]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; CHECK-NEXT: KILL undef %89:sgpr_128 - ; CHECK-NEXT: KILL undef %118:sgpr_128 + ; CHECK-NEXT: KILL undef %112:sgpr_128 + ; CHECK-NEXT: KILL undef %87:sgpr_128 ; CHECK-NEXT: [[S_SUB_I32_2:%[0-9]+]]:sreg_32 = S_SUB_I32 [[S_BUFFER_LOAD_DWORD_IMM1]], 31, implicit-def dead $scc ; CHECK-NEXT: undef [[S_ADD_U32_1:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_]], implicit-def $scc ; CHECK-NEXT: [[S_ADD_U32_1:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc ; CHECK-NEXT: undef [[S_ADD_U32_2:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_1]], implicit-def $scc ; CHECK-NEXT: [[S_ADD_U32_2:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: undef [[S_ADD_U32_3:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_2]], implicit-def $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM2:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_1]], 64, 0 :: (invariant load (s128) from %ir.87, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM3:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_2]], 64, 0 :: (invariant load (s128) from %ir.93, addrspace 4) - ; CHECK-NEXT: KILL [[S_ADD_U32_1]].sub0, [[S_ADD_U32_1]].sub1 + ; CHECK-NEXT: [[S_ASHR_I32_3:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 undef %148:sreg_32, 31, implicit-def dead $scc + ; CHECK-NEXT: undef [[S_ADD_U32_3:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], undef %148:sreg_32, implicit-def $scc + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM3:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_1]], 64, 0 :: (invariant load (s128) from %ir.77, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM4:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_2]], 64, 0 :: (invariant load (s128) from %ir.83, addrspace 4) ; CHECK-NEXT: KILL [[S_ADD_U32_2]].sub0, [[S_ADD_U32_2]].sub1 - ; CHECK-NEXT: [[S_ADD_U32_3:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_ASHR_I32_3:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 undef %169:sreg_32, 31, implicit-def dead $scc - ; CHECK-NEXT: undef [[S_ADD_U32_4:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], undef %169:sreg_32, implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_4:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: undef [[S_ADD_U32_5:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_5:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: undef [[S_ADD_U32_6:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_1]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_6:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: undef [[S_ADD_U32_7:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, undef %169:sreg_32, implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_7:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: undef [[S_ADD_U32_8:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_2]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_8:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: undef [[S_ADD_U32_9:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY8]], [[S_LSHL_B32_]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_9:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %48:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: undef [[S_ADD_U32_10:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_1]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_10:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: undef [[S_ADD_U32_11:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_2]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_11:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: KILL [[S_ADD_U32_1]].sub0, [[S_ADD_U32_1]].sub1 + ; CHECK-NEXT: [[S_ADD_U32_3:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_4:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_4:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_5:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_1]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_5:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_6:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, undef %148:sreg_32, implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_6:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_7:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_2]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_7:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_8:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY8]], [[S_LSHL_B32_]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_8:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %48:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_9:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_1]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_9:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_10:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_2]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_10:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_]], 16, implicit-def dead $scc ; CHECK-NEXT: [[S_ADD_I32_1:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_2]], 16, implicit-def dead $scc ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32)) - ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], undef %302:sreg_32, 0, 0 :: (dereferenceable invariant load (s32)) + ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], undef %279:sreg_32, 0, 0 :: (dereferenceable invariant load (s32)) ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32)) ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[S_MOV_B32_]], 16, 0 :: (dereferenceable invariant load (s32)) - ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %357:sgpr_128, undef %358:sreg_32, 0, 0 :: (dereferenceable invariant load (s32)) - ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %368:sgpr_128, 16, 0 :: (dereferenceable invariant load (s32)) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM4:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_3]], 64, 0 :: (invariant load (s128) from %ir.99, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM5:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 64, 0 :: (invariant load (s128) from %ir.107, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM6:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 0, 0 :: (invariant load (s128) from %ir.112, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM7:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 0, 0 :: (invariant load (s128) from %ir.117, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM8:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 0, 0 :: (invariant load (s128) from %ir.124, addrspace 4) - ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM2]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %352:sgpr_128, [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32)) - ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM5:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %363:sgpr_128, [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32)) - ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM3]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %334:sgpr_128, undef %335:sreg_32, 0, 0 :: (dereferenceable invariant load (s32)) + ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %345:sgpr_128, 16, 0 :: (dereferenceable invariant load (s32)) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM5:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_3]], 64, 0 :: (invariant load (s128) from %ir.95, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM6:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 0, 0 :: (invariant load (s128) from %ir.100, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM7:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 0, 0 :: (invariant load (s128) from %ir.105, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM8:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 0, 0 :: (invariant load (s128) from %ir.112, addrspace 4) + ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %329:sgpr_128, [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32)) + ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM5:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %340:sgpr_128, [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32)) + ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM3]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM4]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[S_ADD_I32_2:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM]], -98, implicit-def dead $scc ; CHECK-NEXT: [[S_ADD_I32_3:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM1]], -114, implicit-def dead $scc ; CHECK-NEXT: [[S_ADD_I32_4:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM2]], -130, implicit-def dead $scc ; CHECK-NEXT: [[S_ADD_I32_5:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM2]], -178, implicit-def dead $scc - ; CHECK-NEXT: undef [[S_ADD_U32_12:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY10]], [[S_LSHL_B32_]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_12:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %42:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: undef [[S_ADD_U32_13:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_13:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: undef [[S_ADD_U32_14:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_1]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_14:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: undef [[S_ADD_U32_15:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_2]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_15:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_11:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY10]], [[S_LSHL_B32_]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_11:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %42:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_12:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_12:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_13:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_1]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_13:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_14:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_2]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_14:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc ; CHECK-NEXT: [[S_LSHL_B32_3:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY12]], 4, implicit-def dead $scc - ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN4:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM4]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN4:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM2]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[S_ADD_I32_6:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_3]], 16, implicit-def dead $scc - ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %384:sgpr_128, [[S_ADD_I32_6]], 0, 0 :: (dereferenceable invariant load (s32)) + ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %361:sgpr_128, [[S_ADD_I32_6]], 0, 0 :: (dereferenceable invariant load (s32)) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN5:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM5]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM9:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 224, 0 :: (invariant load (s128) from %ir.129, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM10:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[COPY7]], 224, 0 :: (invariant load (s128) from %ir.145, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM11:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 576, 0 :: (invariant load (s128) from %ir.150, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM9:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 224, 0 :: (invariant load (s128) from %ir.117, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM10:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[COPY7]], 224, 0 :: (invariant load (s128) from %ir.133, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM11:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 576, 0 :: (invariant load (s128) from %ir.138, addrspace 4) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN6:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM6]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM12:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 224, 0 :: (invariant load (s128) from %ir.134, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM13:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 576, 0 :: (invariant load (s128) from %ir.162, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM14:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 224, 0 :: (invariant load (s128) from %ir.140, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM12:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 224, 0 :: (invariant load (s128) from %ir.122, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM13:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 576, 0 :: (invariant load (s128) from %ir.150, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM14:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 224, 0 :: (invariant load (s128) from %ir.128, addrspace 4) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN7:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM7]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN8:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM8]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[S_ADD_I32_7:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM4]], -217, implicit-def dead $scc @@ -135,49 +133,49 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x ; CHECK-NEXT: [[S_ADD_I32_12:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -329, implicit-def dead $scc ; CHECK-NEXT: [[S_ADD_I32_13:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -345, implicit-def dead $scc ; CHECK-NEXT: [[S_ADD_I32_14:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM6]], -441, implicit-def dead $scc - ; CHECK-NEXT: undef [[S_ADD_U32_16:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_2]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_16:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_15:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_2]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_15:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc ; CHECK-NEXT: [[S_LSHL_B32_4:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY13]], 4, implicit-def dead $scc ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN9:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM9]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[S_ASHR_I32_4:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_4]], 31, implicit-def dead $scc - ; CHECK-NEXT: undef [[S_ADD_U32_17:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_4]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_17:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_4]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: undef [[S_ADD_U32_16:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_4]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_16:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_4]], implicit-def dead $scc, implicit $scc ; CHECK-NEXT: [[S_LSHL_B32_5:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY5]], 3, implicit-def dead $scc ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN10:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM12]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[S_ASHR_I32_5:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_5]], 31, implicit-def dead $scc - ; CHECK-NEXT: undef [[S_ADD_U32_18:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_5]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_18:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_5]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_18]], 168, 0 :: (invariant load (s32) from %ir.273, align 8, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM15:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 576, 0 :: (invariant load (s128) from %ir.157, addrspace 4) + ; CHECK-NEXT: undef [[S_ADD_U32_17:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_5]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_17:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_5]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_17]], 168, 0 :: (invariant load (s32) from %ir.260, align 8, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM15:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 576, 0 :: (invariant load (s128) from %ir.145, addrspace 4) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN11:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM14]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN12:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM10]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN13:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM11]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub3:sgpr_128 = S_MOV_B32 553734060 ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub2:sgpr_128 = S_MOV_B32 -1 ; CHECK-NEXT: [[COPY15:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]] - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM16:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_9]], 0, 0 :: (invariant load (s128) from %ir.170, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM16:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 0, 0 :: (invariant load (s128) from %ir.158, addrspace 4) ; CHECK-NEXT: [[COPY15:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]].sub1 ; CHECK-NEXT: [[COPY15:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORD_IMM]] ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY15]], 0, 0 :: (dereferenceable invariant load (s32)) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN14:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM15]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN15:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM13]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM17:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_10]], 0, 0 :: (invariant load (s128) from %ir.178, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM18:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_11]], 0, 0 :: (invariant load (s128) from %ir.183, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM17:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_9]], 0, 0 :: (invariant load (s128) from %ir.166, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM18:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_10]], 0, 0 :: (invariant load (s128) from %ir.171, addrspace 4) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN16:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM16]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[S_LSHL_B32_6:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY4]], 3, implicit-def dead $scc ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM1]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[S_ASHR_I32_6:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_6]], 31, implicit-def dead $scc ; CHECK-NEXT: [[S_ADD_I32_15:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM4]], -467, implicit-def dead $scc - ; CHECK-NEXT: undef [[S_ADD_U32_19:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_6]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_19:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_6]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_19]], 168, 0 :: (invariant load (s64) from %ir.282, addrspace 4) + ; CHECK-NEXT: undef [[S_ADD_U32_18:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_6]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_18:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_6]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_18]], 168, 0 :: (invariant load (s64) from %ir.269, addrspace 4) ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM17]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM18]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM19:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_12]], 0, 0 :: (invariant load (s128) from %ir.205, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM20:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_13]], 0, 0 :: (invariant load (s128) from %ir.211, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM19:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_11]], 0, 0 :: (invariant load (s128) from %ir.193, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM20:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_12]], 0, 0 :: (invariant load (s128) from %ir.199, addrspace 4) ; CHECK-NEXT: [[COPY16:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]] - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM21:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_14]], 0, 0 :: (invariant load (s128) from %ir.216, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM22:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_15]], 0, 0 :: (invariant load (s128) from %ir.221, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM21:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_13]], 0, 0 :: (invariant load (s128) from %ir.204, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM22:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_14]], 0, 0 :: (invariant load (s128) from %ir.209, addrspace 4) ; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORDX2_IMM1]].sub1, 65535, implicit-def dead $scc ; CHECK-NEXT: [[COPY16:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0 ; CHECK-NEXT: [[COPY16:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_]] @@ -189,30 +187,30 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN20:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM22]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[S_ASHR_I32_7:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_7]], 31, implicit-def dead $scc ; CHECK-NEXT: [[S_ADD_I32_16:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM5]], -468, implicit-def dead $scc - ; CHECK-NEXT: undef [[S_ADD_U32_20:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_7]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_20:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_7]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM2:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_20]], 168, 0 :: (invariant load (s64) from %ir.293, addrspace 4) + ; CHECK-NEXT: undef [[S_ADD_U32_19:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_7]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_19:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_7]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM2:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_19]], 168, 0 :: (invariant load (s64) from %ir.280, addrspace 4) ; CHECK-NEXT: [[COPY17:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]] ; CHECK-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORDX2_IMM2]].sub1, 65535, implicit-def dead $scc ; CHECK-NEXT: [[COPY17:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM2]].sub0 ; CHECK-NEXT: [[COPY17:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_1]] ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY17]], 0, 0 :: (dereferenceable invariant load (s32)) - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM23:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_16]], 160, 0 :: (invariant load (s128) from %ir.256, addrspace 4) - ; CHECK-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %470:sreg_64, 0, 0 :: (invariant load (s32) from `ptr addrspace(4) poison`, addrspace 4) - ; CHECK-NEXT: KILL [[S_ADD_U32_16]].sub0, [[S_ADD_U32_16]].sub1 - ; CHECK-NEXT: KILL undef %470:sreg_64 + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM23:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_15]], 160, 0 :: (invariant load (s128) from %ir.244, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %443:sreg_64, 0, 0 :: (invariant load (s32) from `ptr addrspace(4) poison`, addrspace 4) + ; CHECK-NEXT: KILL [[S_ADD_U32_15]].sub0, [[S_ADD_U32_15]].sub1 ; CHECK-NEXT: KILL [[COPY17]].sub0_sub1_sub2, [[COPY17]].sub3 + ; CHECK-NEXT: KILL undef %443:sreg_64 ; CHECK-NEXT: [[S_LSHL_B32_8:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY14]], 3, implicit-def dead $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM24:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_17]], 160, 0 :: (invariant load (s128) from %ir.265, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM24:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_16]], 160, 0 :: (invariant load (s128) from %ir.252, addrspace 4) ; CHECK-NEXT: [[S_ASHR_I32_8:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_8]], 31, implicit-def dead $scc ; CHECK-NEXT: [[S_ADD_I32_17:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM6]], -469, implicit-def dead $scc - ; CHECK-NEXT: undef [[S_ADD_U32_21:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_8]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_21:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_8]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_21]], 168, 0 :: (invariant load (s32) from %ir.305, align 8, addrspace 4) + ; CHECK-NEXT: undef [[S_ADD_U32_20:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_8]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_20:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_8]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: [[S_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_20]], 168, 0 :: (invariant load (s32) from %ir.291, align 8, addrspace 4) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN21:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM23]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN22:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM24]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM24]] ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM23]] + ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM24]] ; CHECK-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORD_IMM1]], 65535, implicit-def dead $scc ; CHECK-NEXT: [[COPY18:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]] ; CHECK-NEXT: [[COPY18:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_2]] @@ -224,22 +222,22 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x ; CHECK-NEXT: [[S_ADD_I32_21:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -507, implicit-def dead $scc ; CHECK-NEXT: [[S_ADD_I32_22:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -539, implicit-def dead $scc ; CHECK-NEXT: [[S_ADD_I32_23:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM7]], -473, implicit-def dead $scc - ; CHECK-NEXT: undef [[S_ADD_U32_22:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_22:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM25:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_22]], 96, 0 :: (invariant load (s128) from %ir.323, addrspace 4) - ; CHECK-NEXT: undef [[S_ADD_U32_23:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_1]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_23:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM26:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_23]], 96, 0 :: (invariant load (s128) from %ir.329, addrspace 4) - ; CHECK-NEXT: undef [[S_ADD_U32_24:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_2]], implicit-def $scc - ; CHECK-NEXT: [[S_ADD_U32_24:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc - ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM27:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_24]], 96, 0 :: (invariant load (s128) from %ir.335, addrspace 4) + ; CHECK-NEXT: undef [[S_ADD_U32_21:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_21:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM25:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_21]], 96, 0 :: (invariant load (s128) from %ir.309, addrspace 4) + ; CHECK-NEXT: undef [[S_ADD_U32_22:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_1]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_22:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM26:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_22]], 96, 0 :: (invariant load (s128) from %ir.315, addrspace 4) + ; CHECK-NEXT: undef [[S_ADD_U32_23:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_2]], implicit-def $scc + ; CHECK-NEXT: [[S_ADD_U32_23:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc + ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM27:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_23]], 96, 0 :: (invariant load (s128) from %ir.321, addrspace 4) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN23:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM25]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN24:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM26]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN25:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM27]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM27]] ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM25]] - ; CHECK-NEXT: KILL [[V_MOV_B32_e32_]] ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM26]] + ; CHECK-NEXT: KILL [[V_MOV_B32_e32_]] + ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM27]] ; CHECK-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -2, [[BUFFER_LOAD_FORMAT_X_IDXEN]], 0, implicit $exec ; CHECK-NEXT: [[V_ADD_U32_e64_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -1, [[BUFFER_LOAD_FORMAT_X_IDXEN1]], 0, implicit $exec ; CHECK-NEXT: [[V_ADD_U32_e64_2:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -3, [[BUFFER_LOAD_FORMAT_X_IDXEN]], 0, implicit $exec @@ -351,13 +349,13 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x ; CHECK-NEXT: [[V_OR_B32_e64_64:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_OR_B32_e64_63]], [[V_ADD_U32_e64_28]], implicit $exec ; CHECK-NEXT: [[V_ADD_U32_e64_30:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -593, [[BUFFER_LOAD_FORMAT_X_IDXEN]], 0, implicit $exec ; CHECK-NEXT: [[V_OR_B32_e64_65:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_OR_B32_e64_64]], [[V_ADD_U32_e64_29]], implicit $exec - ; CHECK-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef %543:sreg_64, 0, 0 :: (invariant load (s256) from `ptr addrspace(4) poison`, addrspace 4) + ; CHECK-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef %516:sreg_64, 0, 0 :: (invariant load (s256) from `ptr addrspace(4) poison`, addrspace 4) ; CHECK-NEXT: [[V_OR_B32_e64_66:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_OR_B32_e64_65]], [[V_ADD_U32_e64_30]], implicit $exec ; CHECK-NEXT: [[S_ADD_I32_24:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM8]], -594, implicit-def dead $scc ; CHECK-NEXT: [[V_OR_B32_e64_67:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[S_ADD_I32_24]], [[V_OR_B32_e64_66]], implicit $exec ; CHECK-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 0, [[V_OR_B32_e64_67]], implicit $exec ; CHECK-NEXT: undef [[V_CNDMASK_B32_e64_:%[0-9]+]].sub3:vreg_128 = V_CNDMASK_B32_e64 0, 0, 0, 1, [[V_CMP_EQ_U32_e64_]], implicit $exec - ; CHECK-NEXT: IMAGE_STORE_V4_V2_nsa_gfx10 [[V_CNDMASK_B32_e64_]], undef %557:vgpr_32, undef %559:vgpr_32, [[S_LOAD_DWORDX8_IMM]], 15, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s128), addrspace 8) + ; CHECK-NEXT: IMAGE_STORE_V4_V2_nsa_gfx10 [[V_CNDMASK_B32_e64_]], undef %530:vgpr_32, undef %532:vgpr_32, [[S_LOAD_DWORDX8_IMM]], 15, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s128), addrspace 8) ; CHECK-NEXT: S_ENDPGM 0 .expVert: %0 = extractelement <31 x i32> %userData, i64 2 diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll b/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll index db49339..9c16b3c 100644 --- a/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll +++ b/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll @@ -22,8 +22,6 @@ ; GFX9-DAG: s_mov_b32 s[[DESC3:[0-9]+]], 0xe00000 ; OFFREG is offset system SGPR -; GCN: buffer_store_dword {{v[0-9]+}}, off, s[[[DESC0]]:[[DESC3]]], 0 offset:{{[0-9]+}} ; 4-byte Folded Spill -; GCN: buffer_load_dword v{{[0-9]+}}, off, s[[[DESC0]]:[[DESC3]]], 0 offset:{{[0-9]+}} ; 4-byte Folded Reload ; GCN: NumVgprs: 256 ; GCN: ScratchSize: 640 diff --git a/llvm/test/CodeGen/AMDGPU/wqm.ll b/llvm/test/CodeGen/AMDGPU/wqm.ll index ad8dcd3..21f0c00 100644 --- a/llvm/test/CodeGen/AMDGPU/wqm.ll +++ b/llvm/test/CodeGen/AMDGPU/wqm.ll @@ -3477,13 +3477,10 @@ define amdgpu_gs void @wqm_init_exec_wwm() { ; GFX9-W64-NEXT: s_mov_b64 exec, 0 ; GFX9-W64-NEXT: s_mov_b32 s1, 0 ; GFX9-W64-NEXT: s_mov_b32 s0, s1 -; GFX9-W64-NEXT: s_cmp_lg_u64 exec, 0 -; GFX9-W64-NEXT: s_cselect_b64 s[2:3], -1, 0 -; GFX9-W64-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GFX9-W64-NEXT: s_cmp_eq_u64 s[0:1], 0 ; GFX9-W64-NEXT: s_cselect_b64 s[0:1], -1, 0 -; GFX9-W64-NEXT: s_xor_b64 s[0:1], s[2:3], s[0:1] -; GFX9-W64-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1] -; GFX9-W64-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-W64-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-W64-NEXT: v_cndmask_b32_e64 v1, 0, 1.0, s[0:1] ; GFX9-W64-NEXT: exp mrt0 off, off, off, off ; GFX9-W64-NEXT: s_endpgm ; @@ -3491,14 +3488,11 @@ define amdgpu_gs void @wqm_init_exec_wwm() { ; GFX10-W32: ; %bb.0: ; GFX10-W32-NEXT: s_mov_b32 exec_lo, 0 ; GFX10-W32-NEXT: s_mov_b32 s1, 0 -; GFX10-W32-NEXT: s_cmp_lg_u64 exec, 0 +; GFX10-W32-NEXT: v_mov_b32_e32 v0, 0 ; GFX10-W32-NEXT: s_mov_b32 s0, s1 -; GFX10-W32-NEXT: s_cselect_b32 s2, -1, 0 -; GFX10-W32-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX10-W32-NEXT: v_mov_b32_e32 v1, 0 +; GFX10-W32-NEXT: s_cmp_eq_u64 s[0:1], 0 ; GFX10-W32-NEXT: s_cselect_b32 s0, -1, 0 -; GFX10-W32-NEXT: s_xor_b32 s0, s2, s0 -; GFX10-W32-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s0 +; GFX10-W32-NEXT: v_cndmask_b32_e64 v1, 0, 1.0, s0 ; GFX10-W32-NEXT: exp mrt0 off, off, off, off ; GFX10-W32-NEXT: s_endpgm call void @llvm.amdgcn.init.exec(i64 0) diff --git a/llvm/test/CodeGen/ARM/fp-intrinsics.ll b/llvm/test/CodeGen/ARM/fp-intrinsics.ll index 93b6a58..cb87508 100644 --- a/llvm/test/CodeGen/ARM/fp-intrinsics.ll +++ b/llvm/test/CodeGen/ARM/fp-intrinsics.ll @@ -76,7 +76,6 @@ define i32 @fptosi_f32(float %x) #0 { ; CHECK-NOSP: bl __aeabi_f2iz ; CHECK-NOSP: bl __aeabi_f2iz ; CHECK-SP: vcvt.s32.f32 -; FIXME-CHECK-SP: vcvt.s32.f32 define void @fptosi_f32_twice(float %arg, ptr %ptr) #0 { entry: %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %arg, metadata !"fpexcept.strict") #0 @@ -146,6 +145,80 @@ define float @tan_f32(float %x) #0 { ret float %val } +; CHECK-LABEL: acos_f32: +; CHECK: bl acosf +define float @acos_f32(float %x, float %y) #0 { + %val = call float @llvm.experimental.constrained.acos.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %val +} + +; CHECK-LABEL: asin_f32: +; CHECK: bl asinf +define float @asin_f32(float %x, float %y) #0 { + %val = call float @llvm.experimental.constrained.asin.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %val +} + +; CHECK-LABEL: atan_f32: +; CHECK: bl atanf +define float @atan_f32(float %x, float %y) #0 { + %val = call float @llvm.experimental.constrained.atan.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %val +} + +; CHECK-LABEL: cosh_f32: +; CHECK: bl coshf +define float @cosh_f32(float %x, float %y) #0 { + %val = call float @llvm.experimental.constrained.cosh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %val +} + +; CHECK-LABEL: sinh_f32: +; CHECK: bl sinhf +define float @sinh_f32(float %x, float %y) #0 { + %val = call float @llvm.experimental.constrained.sinh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %val +} + +; CHECK-LABEL: tanh_f32: +; CHECK: bl tanhf +define float @tanh_f32(float %x, float %y) #0 { + %val = call float @llvm.experimental.constrained.tanh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %val +} + +; CHECK-LABEL: fmuladd_f32: +; CHECK-SP: vfma.f32 +; CHECK-NOSP: bl __aeabi_fmul +; CHECK-NOSP: bl __aeabi_fadd +define float @fmuladd_f32(float %x, float %y, float %z) #0 { + %val = call float @llvm.experimental.constrained.fmuladd.f32(float %x, float %y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %val +} + +; CHECK-LABEL: ldexp_f32: +; CHECK: bl ldexpf +define float @ldexp_f32(float %x, i32 %y) #0 { + %val = call float @llvm.experimental.constrained.ldexp.f32.i32(float %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %val +} + +; CHECK-LABEL: roundeven_f32: +; CHECK-SP-V8: vrintn.f32 +; CHECK-NOSP: bl roundevenf +define float @roundeven_f32(float %x) #0 { + %val = call float @llvm.experimental.constrained.roundeven.f32(float %x, metadata !"fpexcept.strict") #0 + ret float %val +} + +; CHECK-LABEL: uitofp_f32_i32: +; CHECK-NOSP: bl __aeabi_ui2f +; FIXME-CHECK-SP: vcvt.f32.f64 +define float @uitofp_f32_i32(i32 %x) #0 { + %val = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %val +} + ; CHECK-LABEL: atan2_f32: ; CHECK: bl atan2f define float @atan2_f32(float %x, float %y) #0 { @@ -617,6 +690,80 @@ define double @tan_f64(double %x) #0 { ret double %val } +; CHECK-LABEL: acos_f64: +; CHECK: bl acos +define double @acos_f64(double %x, double %y) #0 { + %val = call double @llvm.experimental.constrained.acos.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret double %val +} + +; CHECK-LABEL: asin_f64: +; CHECK: bl asin +define double @asin_f64(double %x, double %y) #0 { + %val = call double @llvm.experimental.constrained.asin.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret double %val +} + +; CHECK-LABEL: atan_f64: +; CHECK: bl atan +define double @atan_f64(double %x, double %y) #0 { + %val = call double @llvm.experimental.constrained.atan.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret double %val +} + +; CHECK-LABEL: cosh_f64: +; CHECK: bl cosh +define double @cosh_f64(double %x, double %y) #0 { + %val = call double @llvm.experimental.constrained.cosh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret double %val +} + +; CHECK-LABEL: sinh_f64: +; CHECK: bl sinh +define double @sinh_f64(double %x, double %y) #0 { + %val = call double @llvm.experimental.constrained.sinh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret double %val +} + +; CHECK-LABEL: tanh_f64: +; CHECK: bl tanh +define double @tanh_f64(double %x, double %y) #0 { + %val = call double @llvm.experimental.constrained.tanh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret double %val +} + +; CHECK-LABEL: fmuladd_f64: +; CHECK-DP: vfma.f64 +; CHECK-NODP: bl __aeabi_dmul +; CHECK-NODP: bl __aeabi_dadd +define double @fmuladd_f64(double %x, double %y, double %z) #0 { + %val = call double @llvm.experimental.constrained.fmuladd.f64(double %x, double %y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret double %val +} + +; CHECK-LABEL: ldexp_f64: +; CHECK: bl ldexp +define double @ldexp_f64(double %x, i32 %y) #0 { + %val = call double @llvm.experimental.constrained.ldexp.f64.i32(double %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret double %val +} + +; CHECK-LABEL: roundeven_f64: +; CHECK-DP-V8: vrintn.f64 +; CHECK-NODP: bl roundeven +define double @roundeven_f64(double %x) #0 { + %val = call double @llvm.experimental.constrained.roundeven.f64(double %x, metadata !"fpexcept.strict") #0 + ret double %val +} + +; CHECK-LABEL: uitofp_f64_i32: +; CHECK-NOSP: bl __aeabi_ui2d +; FIXME-CHECK-SP: vsub.f64 +define double @uitofp_f64_i32(i32 %x) #0 { + %val = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret double %val +} + ; CHECK-LABEL: atan2_f64: ; CHECK: bl atan2 define double @atan2_f64(double %x, double %y) #0 { @@ -1052,6 +1199,16 @@ declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, meta declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata) declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata) +declare float @llvm.experimental.constrained.acos.f32(float, metadata, metadata) +declare float @llvm.experimental.constrained.asin.f32(float, metadata, metadata) +declare float @llvm.experimental.constrained.atan.f32(float, metadata, metadata) +declare float @llvm.experimental.constrained.cosh.f32(float, metadata, metadata) +declare float @llvm.experimental.constrained.sinh.f32(float, metadata, metadata) +declare float @llvm.experimental.constrained.tanh.f32(float, metadata, metadata) +declare float @llvm.experimental.constrained.fmuladd.f32(float, float, float, metadata, metadata) +declare float @llvm.experimental.constrained.ldexp.f32.i32(float, i32, metadata, metadata) +declare float @llvm.experimental.constrained.roundeven.f32(float, metadata) +declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata) declare float @llvm.experimental.constrained.atan2.f32(float, float, metadata, metadata) declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata) declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata) @@ -1087,6 +1244,16 @@ declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, me declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata) +declare double @llvm.experimental.constrained.acos.f64(double, metadata, metadata) +declare double @llvm.experimental.constrained.asin.f64(double, metadata, metadata) +declare double @llvm.experimental.constrained.atan.f64(double, metadata, metadata) +declare double @llvm.experimental.constrained.cosh.f64(double, metadata, metadata) +declare double @llvm.experimental.constrained.sinh.f64(double, metadata, metadata) +declare double @llvm.experimental.constrained.tanh.f64(double, metadata, metadata) +declare double @llvm.experimental.constrained.fmuladd.f64(double, double, double, metadata, metadata) +declare double @llvm.experimental.constrained.ldexp.f64.i32(double, i32, metadata, metadata) +declare double @llvm.experimental.constrained.roundeven.f64(double, metadata) +declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata) declare double @llvm.experimental.constrained.atan2.f64(double, double, metadata, metadata) declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata) declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata) diff --git a/llvm/test/CodeGen/ARM/fp16-fullfp16.ll b/llvm/test/CodeGen/ARM/fp16-fullfp16.ll index 200b14b..b4060d5 100644 --- a/llvm/test/CodeGen/ARM/fp16-fullfp16.ll +++ b/llvm/test/CodeGen/ARM/fp16-fullfp16.ll @@ -98,12 +98,18 @@ define i32 @test_fptosi_i32(ptr %p) { ret i32 %r } -; FIXME -;define i64 @test_fptosi_i64(ptr %p) { -; %a = load half, ptr %p, align 2 -; %r = fptosi half %a to i64 -; ret i64 %r -;} +define i64 @test_fptosi_i64(ptr %p) { +; CHECK-LABEL: test_fptosi_i64: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: ldrh r0, [r0] +; CHECK-NEXT: vmov s0, r0 +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: pop {r11, pc} + %a = load half, ptr %p, align 2 + %r = fptosi half %a to i64 + ret i64 %r +} define i32 @test_fptoui_i32(ptr %p) { ; CHECK-LABEL: test_fptoui_i32: @@ -116,12 +122,18 @@ define i32 @test_fptoui_i32(ptr %p) { ret i32 %r } -; FIXME -;define i64 @test_fptoui_i64(ptr %p) { -; %a = load half, ptr %p, align 2 -; %r = fptoui half %a to i64 -; ret i64 %r -;} +define i64 @test_fptoui_i64(ptr %p) { +; CHECK-LABEL: test_fptoui_i64: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: ldrh r0, [r0] +; CHECK-NEXT: vmov s0, r0 +; CHECK-NEXT: bl __fixunshfdi +; CHECK-NEXT: pop {r11, pc} + %a = load half, ptr %p, align 2 + %r = fptoui half %a to i64 + ret i64 %r +} define void @test_sitofp_i32(i32 %a, ptr %p) { ; CHECK-LABEL: test_sitofp_i32: @@ -145,19 +157,31 @@ define void @test_uitofp_i32(i32 %a, ptr %p) { ret void } -; FIXME -;define void @test_sitofp_i64(i64 %a, ptr %p) { -; %r = sitofp i64 %a to half -; store half %r, ptr %p -; ret void -;} +define void @test_sitofp_i64(i64 %a, ptr %p) { +; CHECK-LABEL: test_sitofp_i64: +; CHECK: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: mov r4, r2 +; CHECK-NEXT: bl __floatdihf +; CHECK-NEXT: vstr.16 s0, [r4] +; CHECK-NEXT: pop {r4, pc} + %r = sitofp i64 %a to half + store half %r, ptr %p + ret void +} -; FIXME -;define void @test_uitofp_i64(i64 %a, ptr %p) { -; %r = uitofp i64 %a to half -; store half %r, ptr %p -; ret void -;} +define void @test_uitofp_i64(i64 %a, ptr %p) { +; CHECK-LABEL: test_uitofp_i64: +; CHECK: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: mov r4, r2 +; CHECK-NEXT: bl __floatundihf +; CHECK-NEXT: vstr.16 s0, [r4] +; CHECK-NEXT: pop {r4, pc} + %r = uitofp i64 %a to half + store half %r, ptr %p + ret void +} define void @test_fptrunc_float(float %f, ptr %p) { ; CHECK-LABEL: test_fptrunc_float: @@ -613,6 +637,902 @@ define void @test_fmuladd(ptr %p, ptr %q, ptr %r) { ret void } +; Half-precision intrinsics + +define half @add_f16(half %x, half %y) #0 { +; CHECK-LABEL: add_f16: +; CHECK: vadd.f16 s0, s0, s1 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.fadd.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @sub_f16(half %x, half %y) #0 { +; CHECK-LABEL: sub_f16: +; CHECK: vsub.f16 s0, s0, s1 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.fsub.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @mul_f16(half %x, half %y) #0 { +; CHECK-LABEL: mul_f16: +; CHECK: vmul.f16 s0, s0, s1 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.fmul.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @div_f16(half %x, half %y) #0 { +; CHECK-LABEL: div_f16: +; CHECK: vdiv.f16 s0, s0, s1 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.fdiv.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @frem_f16(half %x, half %y) #0 { +; CHECK-LABEL: frem_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: vcvtb.f32.f16 s1, s1 +; CHECK-NEXT: bl fmodf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.frem.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @fma_f16(half %x, half %y, half %z) #0 { +; CHECK-LABEL: fma_f16: +; CHECK: vfma.f16 s2, s0, s1 +; CHECK-NEXT: vmov.f32 s0, s2 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.fma.f16(half %x, half %y, half %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @fmuladd_f16(half %x, half %y, half %z) #0 { +; CHECK-LABEL: fmuladd_f16: +; CHECK: vfma.f16 s2, s0, s1 +; CHECK-NEXT: vmov.f32 s0, s2 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.fmuladd.f16(half %x, half %y, half %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define i32 @fptosi_i32_f16(half %x) #0 { +; CHECK-LABEL: fptosi_i32_f16: +; CHECK: vcvt.s32.f16 s0, s0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: bx lr + %val = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %x, metadata !"fpexcept.strict") #0 + ret i32 %val +} + +define i32 @fptoui_i32_f16(half %x) #0 { +; CHECK-LABEL: fptoui_i32_f16: +; CHECK: vcvt.s32.f16 s0, s0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: bx lr + %val = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict") #0 + ret i32 %val +} + +define i64 @fptosi_i64_f16(half %x) #0 { +; CHECK-LABEL: fptosi_i64_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov.f16 r0, s0 +; CHECK-NEXT: vmov s0, r0 +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: pop {r11, pc} + %val = call i64 @llvm.experimental.constrained.fptosi.i64.f16(half %x, metadata !"fpexcept.strict") #0 + ret i64 %val +} + +define i64 @fptoui_i64_f16(half %x) #0 { +; CHECK-LABEL: fptoui_i64_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov.f16 r0, s0 +; CHECK-NEXT: vmov s0, r0 +; CHECK-NEXT: bl __fixunshfdi +; CHECK-NEXT: pop {r11, pc} + %val = call i64 @llvm.experimental.constrained.fptoui.i64.f16(half %x, metadata !"fpexcept.strict") #0 + ret i64 %val +} + +define half @sitofp_f16_i32(i32 %x) #0 { +; CHECK-LABEL: sitofp_f16_i32: +; CHECK: .pad #8 +; CHECK-NEXT: sub sp, sp, #8 +; CHECK-NEXT: movw r1, #0 +; CHECK-NEXT: eor r0, r0, #-2147483648 +; CHECK-NEXT: movt r1, #17200 +; CHECK-NEXT: str r0, [sp] +; CHECK-NEXT: str r1, [sp, #4] +; CHECK-NEXT: vldr d16, .LCPI57_0 +; CHECK-NEXT: vldr d17, [sp] +; CHECK-NEXT: vsub.f64 d16, d17, d16 +; CHECK-NEXT: vcvtb.f16.f64 s0, d16 +; CHECK-NEXT: add sp, sp, #8 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: .LCPI57_0: +; CHECK-NEXT: .long 2147483648 +; CHECK-NEXT: .long 1127219200 + %val = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @uitofp_f16_i32(i32 %x) #0 { +; CHECK-LABEL: uitofp_f16_i32: +; CHECK: .pad #8 +; CHECK-NEXT: sub sp, sp, #8 +; CHECK-NEXT: movw r1, #0 +; CHECK-NEXT: str r0, [sp] +; CHECK-NEXT: movt r1, #17200 +; CHECK-NEXT: vldr d16, .LCPI58_0 +; CHECK-NEXT: str r1, [sp, #4] +; CHECK-NEXT: vldr d17, [sp] +; CHECK-NEXT: vsub.f64 d16, d17, d16 +; CHECK-NEXT: vcvtb.f16.f64 s0, d16 +; CHECK-NEXT: add sp, sp, #8 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: .LCPI58_0: +; CHECK-NEXT: .long 0 +; CHECK-NEXT: .long 1127219200 + %val = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @sitofp_f16_i64(i64 %x) #0 { +; CHECK-LABEL: sitofp_f16_i64: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl __floatdihf +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @uitofp_f16_i64(i64 %x) #0 { +; CHECK-LABEL: uitofp_f16_i64: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl __floatundihf +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @sitofp_f16_i128(i128 %x) #0 { +; CHECK-LABEL: sitofp_f16_i128: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl __floattihf +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.sitofp.f16.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @uitofp_f16_i128(i128 %x) #0 { +; CHECK-LABEL: uitofp_f16_i128: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl __floatuntihf +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.uitofp.f16.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @sqrt_f16(half %x) #0 { +; CHECK-LABEL: sqrt_f16: +; CHECK: vsqrt.f16 s0, s0 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.sqrt.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @powi_f16(half %x, i32 %y) #0 { +; CHECK-LABEL: powi_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl __powisf2 +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.powi.f16(half %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @sin_f16(half %x) #0 { +; CHECK-LABEL: sin_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl sinf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.sin.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @cos_f16(half %x) #0 { +; CHECK-LABEL: cos_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl cosf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.cos.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @tan_f16(half %x) #0 { +; CHECK-LABEL: tan_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl tanf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.tan.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @asin_f16(half %x) #0 { +; CHECK-LABEL: asin_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl asinf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.asin.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @acos_f16(half %x) #0 { +; CHECK-LABEL: acos_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl acosf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.acos.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @atan_f16(half %x) #0 { +; CHECK-LABEL: atan_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl atanf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.atan.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @atan2_f16(half %x, half %y) #0 { +; CHECK-LABEL: atan2_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: vcvtb.f32.f16 s1, s1 +; CHECK-NEXT: bl atan2f +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.atan2.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @sinh_f16(half %x) #0 { +; CHECK-LABEL: sinh_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl sinhf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.sinh.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @cosh_f16(half %x) #0 { +; CHECK-LABEL: cosh_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl coshf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.cosh.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @tanh_f16(half %x) #0 { +; CHECK-LABEL: tanh_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl tanhf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.tanh.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @pow_f16(half %x, half %y) #0 { +; CHECK-LABEL: pow_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: vcvtb.f32.f16 s1, s1 +; CHECK-NEXT: bl powf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.pow.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @log_f16(half %x) #0 { +; CHECK-LABEL: log_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl logf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.log.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @log10_f16(half %x) #0 { +; CHECK-LABEL: log10_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl log10f +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.log10.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @log2_f16(half %x) #0 { +; CHECK-LABEL: log2_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl log2f +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.log2.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @exp_f16(half %x) #0 { +; CHECK-LABEL: exp_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl expf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.exp.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @exp2_f16(half %x) #0 { +; CHECK-LABEL: exp2_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl exp2f +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.exp2.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @rint_f16(half %x) #0 { +; CHECK-LABEL: rint_f16: +; CHECK: vrintx.f16 s0, s0 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.rint.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @nearbyint_f16(half %x) #0 { +; CHECK-LABEL: nearbyint_f16: +; CHECK: vrintr.f16 s0, s0 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.nearbyint.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define i32 @lrint_f16(half %x) #0 { +; CHECK-LABEL: lrint_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl lrintf +; CHECK-NEXT: pop {r11, pc} + %val = call i32 @llvm.experimental.constrained.lrint.i32.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret i32 %val +} + +define i64 @llrint_f16(half %x) #0 { +; CHECK-LABEL: llrint_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl llrintf +; CHECK-NEXT: pop {r11, pc} + %val = call i64 @llvm.experimental.constrained.llrint.i64.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret i64 %val +} + +define half @maxnum_f16(half %x, half %y) #0 { +; CHECK-LABEL: maxnum_f16: +; CHECK: vmaxnm.f16 s0, s0, s1 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.maxnum.f16(half %x, half %y, metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @minnum_f16(half %x, half %y) #0 { +; CHECK-LABEL: minnum_f16: +; CHECK: vminnm.f16 s0, s0, s1 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.minnum.f16(half %x, half %y, metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @ceil_f16(half %x) #0 { +; CHECK-LABEL: ceil_f16: +; CHECK: vrintp.f16 s0, s0 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.ceil.f16(half %x, metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @floor_f16(half %x) #0 { +; CHECK-LABEL: floor_f16: +; CHECK: vrintm.f16 s0, s0 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.floor.f16(half %x, metadata !"fpexcept.strict") #0 + ret half %val +} + +define i32 @lround_f16(half %x) #0 { +; CHECK-LABEL: lround_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl lroundf +; CHECK-NEXT: pop {r11, pc} + %val = call i32 @llvm.experimental.constrained.lround.i32.f16(half %x, metadata !"fpexcept.strict") #0 + ret i32 %val +} + +define i64 @llround_f16(half %x) #0 { +; CHECK-LABEL: llround_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl llroundf +; CHECK-NEXT: pop {r11, pc} + %val = call i64 @llvm.experimental.constrained.llround.i64.f16(half %x, metadata !"fpexcept.strict") #0 + ret i64 %val +} + +define half @round_f16(half %x) #0 { +; CHECK-LABEL: round_f16: +; CHECK: vrinta.f16 s0, s0 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.round.f16(half %x, metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @roundeven_f16(half %x) #0 { +; CHECK-LABEL: roundeven_f16: +; CHECK: vrintn.f16 s0, s0 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.roundeven.f16(half %x, metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @trunc_f16(half %x) #0 { +; CHECK-LABEL: trunc_f16: +; CHECK: vrintz.f16 s0, s0 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.trunc.f16(half %x, metadata !"fpexcept.strict") #0 + ret half %val +} + +define half @ldexp_f16(half %x, i32 %y) #0 { +; CHECK-LABEL: ldexp_f16: +; CHECK: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bl ldexpf +; CHECK-NEXT: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: pop {r11, pc} + %val = call half @llvm.experimental.constrained.ldexp.f16.i32(half %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define i32 @fcmp_olt_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmp_olt_f16: +; CHECK: vcmp.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwmi r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"olt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ole_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmp_ole_f16: +; CHECK: vcmp.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwls r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ole", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ogt_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmp_ogt_f16: +; CHECK: vcmp.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ogt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_oge_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmp_oge_f16: +; CHECK: vcmp.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwge r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oge", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_oeq_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmp_oeq_f16: +; CHECK: vcmp.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movweq r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oeq", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_one_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmp_one_f16: +; CHECK: vcmp.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwmi r0, #1 +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"one", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ult_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmp_ult_f16: +; CHECK: vcmp.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwlt r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ult", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ule_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmp_ule_f16: +; CHECK: vcmp.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwle r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ule", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ugt_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmp_ugt_f16: +; CHECK: vcmp.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwhi r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ugt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_uge_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmp_uge_f16: +; CHECK: vcmp.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwpl r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"uge", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ueq_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmp_ueq_f16: +; CHECK: vcmp.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movweq r0, #1 +; CHECK-NEXT: movwvs r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ueq", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_une_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmp_une_f16: +; CHECK: vcmp.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwne r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"une", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_olt_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmps_olt_f16: +; CHECK: vcmpe.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwmi r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"olt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ole_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmps_ole_f16: +; CHECK: vcmpe.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwls r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ole", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ogt_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmps_ogt_f16: +; CHECK: vcmpe.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ogt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_oge_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmps_oge_f16: +; CHECK: vcmpe.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwge r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"oge", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_oeq_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmps_oeq_f16: +; CHECK: vcmpe.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movweq r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"oeq", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_one_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmps_one_f16: +; CHECK: vcmpe.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwmi r0, #1 +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"one", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ult_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmps_ult_f16: +; CHECK: vcmpe.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwlt r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ult", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ule_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmps_ule_f16: +; CHECK: vcmpe.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwle r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ule", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ugt_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmps_ugt_f16: +; CHECK: vcmpe.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwhi r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ugt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_uge_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmps_uge_f16: +; CHECK: vcmpe.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwpl r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"uge", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ueq_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmps_ueq_f16: +; CHECK: vcmpe.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movweq r0, #1 +; CHECK-NEXT: movwvs r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ueq", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_une_f16(half %a, half %b) #0 { +; CHECK-LABEL: fcmps_une_f16: +; CHECK: vcmpe.f16 s0, s1 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwne r0, #1 +; CHECK-NEXT: bx lr + %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"une", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + + +; Intrinsics to convert between floating-point types + +define half @fptrunc_f16_f32(float %x) #0 { +; CHECK-LABEL: fptrunc_f16_f32: +; CHECK: vcvtb.f16.f32 s0, s0 +; CHECK-NEXT: bx lr + %val = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret half %val +} + +define float @fpext_f32_f16(half %x) #0 { +; CHECK-LABEL: fpext_f32_f16: +; CHECK: vcvtb.f32.f16 s0, s0 +; CHECK-NEXT: bx lr + %val = call float @llvm.experimental.constrained.fpext.f32.f16(half %x, metadata !"fpexcept.strict") #0 + ret float %val +} + + +attributes #0 = { strictfp } + +declare half @llvm.experimental.constrained.fadd.f16(half, half, metadata, metadata) +declare half @llvm.experimental.constrained.fsub.f16(half, half, metadata, metadata) +declare half @llvm.experimental.constrained.fmul.f16(half, half, metadata, metadata) +declare half @llvm.experimental.constrained.fdiv.f16(half, half, metadata, metadata) +declare half @llvm.experimental.constrained.frem.f16(half, half, metadata, metadata) +declare half @llvm.experimental.constrained.fma.f16(half, half, half, metadata, metadata) +declare half @llvm.experimental.constrained.fmuladd.f16(half, half, half, metadata, metadata) +declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata) +declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata) +declare i64 @llvm.experimental.constrained.fptosi.i64.f16(half, metadata) +declare i64 @llvm.experimental.constrained.fptoui.i64.f16(half, metadata) +declare half @llvm.experimental.constrained.sitofp.f16.i32(i32, metadata, metadata) +declare half @llvm.experimental.constrained.uitofp.f16.i32(i32, metadata, metadata) +declare half @llvm.experimental.constrained.sitofp.f16.i64(i64, metadata, metadata) +declare half @llvm.experimental.constrained.uitofp.f16.i64(i64, metadata, metadata) +declare half @llvm.experimental.constrained.sitofp.f16.i128(i128, metadata, metadata) +declare half @llvm.experimental.constrained.uitofp.f16.i128(i128, metadata, metadata) +declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata) +declare half @llvm.experimental.constrained.powi.f16(half, i32, metadata, metadata) +declare half @llvm.experimental.constrained.sin.f16(half, metadata, metadata) +declare half @llvm.experimental.constrained.cos.f16(half, metadata, metadata) +declare half @llvm.experimental.constrained.tan.f16(half, metadata, metadata) +declare half @llvm.experimental.constrained.pow.f16(half, half, metadata, metadata) +declare half @llvm.experimental.constrained.log.f16(half, metadata, metadata) +declare half @llvm.experimental.constrained.log10.f16(half, metadata, metadata) +declare half @llvm.experimental.constrained.log2.f16(half, metadata, metadata) +declare half @llvm.experimental.constrained.exp.f16(half, metadata, metadata) +declare half @llvm.experimental.constrained.exp2.f16(half, metadata, metadata) +declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata) +declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata) +declare i32 @llvm.experimental.constrained.lrint.i32.f16(half, metadata, metadata) +declare i64 @llvm.experimental.constrained.llrint.i64.f16(half, metadata, metadata) +declare half @llvm.experimental.constrained.maxnum.f16(half, half, metadata) +declare half @llvm.experimental.constrained.minnum.f16(half, half, metadata) +declare half @llvm.experimental.constrained.ceil.f16(half, metadata) +declare half @llvm.experimental.constrained.floor.f16(half, metadata) +declare i32 @llvm.experimental.constrained.lround.i32.f16(half, metadata) +declare i64 @llvm.experimental.constrained.llround.i64.f16(half, metadata) +declare half @llvm.experimental.constrained.round.f16(half, metadata) +declare half @llvm.experimental.constrained.roundeven.f16(half, metadata) +declare half @llvm.experimental.constrained.trunc.f16(half, metadata) +declare i1 @llvm.experimental.constrained.fcmps.f16(half, half, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmp.f16(half, half, metadata, metadata) + +declare half @llvm.experimental.constrained.fptrunc.f16.f32(float, metadata, metadata) +declare float @llvm.experimental.constrained.fpext.f32.f16(half, metadata) + + declare half @llvm.sqrt.f16(half %a) declare half @llvm.powi.f16.i32(half %a, i32 %b) declare half @llvm.sin.f16(half %a) diff --git a/llvm/test/CodeGen/ARM/strict-fp-func.ll b/llvm/test/CodeGen/ARM/strict-fp-func.ll new file mode 100644 index 0000000..39bb2b4 --- /dev/null +++ b/llvm/test/CodeGen/ARM/strict-fp-func.ll @@ -0,0 +1,13 @@ +; RUN: llc -mtriple arm-none-eabi -stop-after=finalize-isel %s -o - | FileCheck %s + +define float @func_02(float %x, float %y) strictfp nounwind { + %call = call float @func_01(float %x) strictfp + %res = call float @llvm.experimental.constrained.fadd.f32(float %call, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") strictfp + ret float %res +} +; CHECK-LABEL: name: func_02 +; CHECK: BL @func_01, {{.*}}, implicit-def $fpscr_rm + + +declare float @func_01(float) +declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) diff --git a/llvm/test/CodeGen/ARM/strict-fp-int-promote.ll b/llvm/test/CodeGen/ARM/strict-fp-int-promote.ll new file mode 100644 index 0000000..6e5b589 --- /dev/null +++ b/llvm/test/CodeGen/ARM/strict-fp-int-promote.ll @@ -0,0 +1,159 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple armv7-- -mattr=+vfp4 -O0 -o - %s | FileCheck %s +; RUN: llc -mtriple armv7-- -mattr=+vfp4 -O3 -o - %s | FileCheck %s --check-prefix=CHECK-O3 + +declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata) +declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) +declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata) + +define i32 @test(i32 %a, i16 %b) #0 { +; CHECK-LABEL: test: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: mov r2, r0 +; CHECK-NEXT: sxth r0, r1 +; CHECK-NEXT: movw r1, #0 +; CHECK-NEXT: movt r1, #17200 +; CHECK-NEXT: str r1, [sp, #4] +; CHECK-NEXT: eor r2, r2, #-2147483648 +; CHECK-NEXT: str r2, [sp] +; CHECK-NEXT: vldr d16, [sp] +; CHECK-NEXT: vldr d17, .LCPI0_0 +; CHECK-NEXT: vsub.f64 d16, d16, d17 +; CHECK-NEXT: vcvt.f32.f64 s0, d16 +; CHECK-NEXT: str r1, [sp, #12] +; CHECK-NEXT: eor r0, r0, #-2147483648 +; CHECK-NEXT: str r0, [sp, #8] +; CHECK-NEXT: vldr d16, [sp, #8] +; CHECK-NEXT: vsub.f64 d16, d16, d17 +; CHECK-NEXT: vcvt.f32.f64 s2, d16 +; CHECK-NEXT: vcmp.f32 s0, s2 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: movweq r0, #1 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI0_0: +; CHECK-NEXT: .long 2147483648 @ double 4503601774854144 +; CHECK-NEXT: .long 1127219200 +; +; CHECK-O3-LABEL: test: +; CHECK-O3: @ %bb.0: @ %entry +; CHECK-O3-NEXT: sub sp, sp, #16 +; CHECK-O3-NEXT: sxth r1, r1 +; CHECK-O3-NEXT: movw r2, #0 +; CHECK-O3-NEXT: movt r2, #17200 +; CHECK-O3-NEXT: str r2, [sp, #4] +; CHECK-O3-NEXT: eor r0, r0, #-2147483648 +; CHECK-O3-NEXT: str r0, [sp] +; CHECK-O3-NEXT: vldr d16, [sp] +; CHECK-O3-NEXT: vldr d17, .LCPI0_0 +; CHECK-O3-NEXT: vsub.f64 d16, d16, d17 +; CHECK-O3-NEXT: vcvt.f32.f64 s0, d16 +; CHECK-O3-NEXT: str r2, [sp, #12] +; CHECK-O3-NEXT: eor r0, r1, #-2147483648 +; CHECK-O3-NEXT: str r0, [sp, #8] +; CHECK-O3-NEXT: vldr d16, [sp, #8] +; CHECK-O3-NEXT: vsub.f64 d16, d16, d17 +; CHECK-O3-NEXT: vcvt.f32.f64 s2, d16 +; CHECK-O3-NEXT: vcmp.f32 s0, s2 +; CHECK-O3-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-O3-NEXT: mov r0, #0 +; CHECK-O3-NEXT: movweq r0, #1 +; CHECK-O3-NEXT: add sp, sp, #16 +; CHECK-O3-NEXT: bx lr +; CHECK-O3-NEXT: .p2align 3 +; CHECK-O3-NEXT: @ %bb.1: +; CHECK-O3-NEXT: .LCPI0_0: +; CHECK-O3-NEXT: .long 2147483648 @ double 4503601774854144 +; CHECK-O3-NEXT: .long 1127219200 +entry: + %conv = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #1 + %conv1 = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #1 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %conv, float %conv1, metadata !"oeq", metadata !"fpexcept.strict") #1 + %conv2 = zext i1 %cmp to i32 + ret i32 %conv2 +} + +define i32 @test2(i32 %a, i16 %b) #0 { +; CHECK-LABEL: test2: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: mov r2, r0 +; CHECK-NEXT: uxth r0, r1 +; CHECK-NEXT: movw r1, #0 +; CHECK-NEXT: movt r1, #17200 +; CHECK-NEXT: str r1, [sp, #4] +; CHECK-NEXT: eor r2, r2, #-2147483648 +; CHECK-NEXT: str r2, [sp] +; CHECK-NEXT: vldr d16, [sp] +; CHECK-NEXT: vldr d17, .LCPI1_0 +; CHECK-NEXT: vsub.f64 d16, d16, d17 +; CHECK-NEXT: vcvt.f32.f64 s0, d16 +; CHECK-NEXT: str r1, [sp, #12] +; CHECK-NEXT: str r0, [sp, #8] +; CHECK-NEXT: vldr d16, [sp, #8] +; CHECK-NEXT: vldr d17, .LCPI1_1 +; CHECK-NEXT: vsub.f64 d16, d16, d17 +; CHECK-NEXT: vcvt.f32.f64 s2, d16 +; CHECK-NEXT: vcmp.f32 s0, s2 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: movweq r0, #1 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI1_0: +; CHECK-NEXT: .long 2147483648 @ double 4503601774854144 +; CHECK-NEXT: .long 1127219200 +; CHECK-NEXT: .LCPI1_1: +; CHECK-NEXT: .long 0 @ double 4503599627370496 +; CHECK-NEXT: .long 1127219200 +; +; CHECK-O3-LABEL: test2: +; CHECK-O3: @ %bb.0: @ %entry +; CHECK-O3-NEXT: sub sp, sp, #16 +; CHECK-O3-NEXT: uxth r1, r1 +; CHECK-O3-NEXT: movw r2, #0 +; CHECK-O3-NEXT: movt r2, #17200 +; CHECK-O3-NEXT: str r2, [sp, #4] +; CHECK-O3-NEXT: eor r0, r0, #-2147483648 +; CHECK-O3-NEXT: str r0, [sp] +; CHECK-O3-NEXT: vldr d16, [sp] +; CHECK-O3-NEXT: vldr d17, .LCPI1_0 +; CHECK-O3-NEXT: vsub.f64 d16, d16, d17 +; CHECK-O3-NEXT: vcvt.f32.f64 s0, d16 +; CHECK-O3-NEXT: str r2, [sp, #12] +; CHECK-O3-NEXT: str r1, [sp, #8] +; CHECK-O3-NEXT: vldr d16, [sp, #8] +; CHECK-O3-NEXT: vldr d17, .LCPI1_1 +; CHECK-O3-NEXT: vsub.f64 d16, d16, d17 +; CHECK-O3-NEXT: vcvt.f32.f64 s2, d16 +; CHECK-O3-NEXT: vcmp.f32 s0, s2 +; CHECK-O3-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-O3-NEXT: mov r0, #0 +; CHECK-O3-NEXT: movweq r0, #1 +; CHECK-O3-NEXT: add sp, sp, #16 +; CHECK-O3-NEXT: bx lr +; CHECK-O3-NEXT: .p2align 3 +; CHECK-O3-NEXT: @ %bb.1: +; CHECK-O3-NEXT: .LCPI1_0: +; CHECK-O3-NEXT: .long 2147483648 @ double 4503601774854144 +; CHECK-O3-NEXT: .long 1127219200 +; CHECK-O3-NEXT: .LCPI1_1: +; CHECK-O3-NEXT: .long 0 @ double 4503599627370496 +; CHECK-O3-NEXT: .long 1127219200 +entry: + %conv = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #1 + %conv1 = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #1 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %conv, float %conv1, metadata !"oeq", metadata !"fpexcept.strict") #1 + %conv2 = zext i1 %cmp to i32 + ret i32 %conv2 +} + +attributes #0 = { strictfp noinline optnone } +attributes #1 = { strictfp } diff --git a/llvm/test/CodeGen/ARM/strict-fp-ops.ll b/llvm/test/CodeGen/ARM/strict-fp-ops.ll new file mode 100644 index 0000000..608ab07 --- /dev/null +++ b/llvm/test/CodeGen/ARM/strict-fp-ops.ll @@ -0,0 +1,202 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple armv7-- -mattr=+vfp4 %s -o - | FileCheck %s + + +; Div whose result is unused should be removed unless we have strict exceptions + +define void @unused_div(float %x, float %y) { +; CHECK-LABEL: unused_div: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: bx lr +entry: + %add = fdiv float %x, %y + ret void +} + +define void @unused_div_fpexcept_strict(float %x, float %y) #0 { +; CHECK-LABEL: unused_div_fpexcept_strict: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov s0, r1 +; CHECK-NEXT: vmov s2, r0 +; CHECK-NEXT: vdiv.f32 s0, s2, s0 +; CHECK-NEXT: bx lr +entry: + %add = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret void +} + +define void @unused_div_round_dynamic(float %x, float %y) #0 { +; CHECK-LABEL: unused_div_round_dynamic: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: bx lr +entry: + %add = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 + ret void +} + + +; Machine CSE should eliminate the second add unless we have strict exceptions + +define float @add_twice(float %x, float %y, i32 %n) { +; CHECK-LABEL: add_twice: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov s0, r1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: vmov s2, r0 +; CHECK-NEXT: vadd.f32 s0, s2, s0 +; CHECK-NEXT: vmul.f32 s2, s0, s0 +; CHECK-NEXT: vmoveq.f32 s2, s0 +; CHECK-NEXT: vmov r0, s2 +; CHECK-NEXT: bx lr +entry: + %add = fadd float %x, %y + %tobool.not = icmp eq i32 %n, 0 + br i1 %tobool.not, label %if.end, label %if.then + +if.then: + %add1 = fadd float %x, %y + %mul = fmul float %add, %add1 + br label %if.end + +if.end: + %a.0 = phi float [ %mul, %if.then ], [ %add, %entry ] + ret float %a.0 +} + +define float @add_twice_fpexcept_strict(float %x, float %y, i32 %n) #0 { +; CHECK-LABEL: add_twice_fpexcept_strict: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov s2, r1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: vmov s4, r0 +; CHECK-NEXT: vadd.f32 s0, s4, s2 +; CHECK-NEXT: vaddne.f32 s2, s4, s2 +; CHECK-NEXT: vmulne.f32 s0, s0, s2 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: bx lr +entry: + %add = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + %tobool.not = icmp eq i32 %n, 0 + br i1 %tobool.not, label %if.end, label %if.then + +if.then: + %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + %mul = call float @llvm.experimental.constrained.fmul.f32(float %add, float %add1, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + br label %if.end + +if.end: + %a.0 = phi float [ %mul, %if.then ], [ %add, %entry ] + ret float %a.0 +} + +define float @add_twice_round_dynamic(float %x, float %y, i32 %n) #0 { +; CHECK-LABEL: add_twice_round_dynamic: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov s0, r1 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: vmov s2, r0 +; CHECK-NEXT: vadd.f32 s0, s2, s0 +; CHECK-NEXT: vmulne.f32 s0, s0, s0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: bx lr +entry: + %add = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 + %tobool.not = icmp eq i32 %n, 0 + br i1 %tobool.not, label %if.end, label %if.then + +if.then: + %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 + %mul = call float @llvm.experimental.constrained.fmul.f32(float %add, float %add1, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 + br label %if.end + +if.end: + %a.0 = phi float [ %mul, %if.then ], [ %add, %entry ] + ret float %a.0 +} + +; Two adds separated by llvm.set.rounding should be preserved when rounding is +; dynamic (as they may give different results) or when we have strict exceptions +; (the llvm.set.rounding is irrelevant, but both could trap). + +define float @set_rounding(float %x, float %y) { +; CHECK-LABEL: set_rounding: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmrs r2, fpscr +; CHECK-NEXT: vmov s2, r0 +; CHECK-NEXT: vmov s0, r1 +; CHECK-NEXT: vadd.f32 s0, s2, s0 +; CHECK-NEXT: vsub.f32 s0, s0, s0 +; CHECK-NEXT: orr r0, r2, #12582912 +; CHECK-NEXT: vmsr fpscr, r0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: vmrs r1, fpscr +; CHECK-NEXT: bic r1, r1, #12582912 +; CHECK-NEXT: vmsr fpscr, r1 +; CHECK-NEXT: bx lr +entry: + %add1 = fadd float %x, %y + call void @llvm.set.rounding(i32 0) + %add2 = fadd float %x, %y + call void @llvm.set.rounding(i32 1) + %sub = fsub float %add1, %add2 + ret float %sub +} + +define float @set_rounding_fpexcept_strict(float %x, float %y) #0 { +; CHECK-LABEL: set_rounding_fpexcept_strict: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov s0, r1 +; CHECK-NEXT: vmov s2, r0 +; CHECK-NEXT: vadd.f32 s4, s2, s0 +; CHECK-NEXT: vmrs r0, fpscr +; CHECK-NEXT: orr r0, r0, #12582912 +; CHECK-NEXT: vmsr fpscr, r0 +; CHECK-NEXT: vadd.f32 s0, s2, s0 +; CHECK-NEXT: vmrs r0, fpscr +; CHECK-NEXT: bic r0, r0, #12582912 +; CHECK-NEXT: vmsr fpscr, r0 +; CHECK-NEXT: vsub.f32 s0, s4, s0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: bx lr +entry: + %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + call void @llvm.set.rounding(i32 0) #0 + %add2 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + call void @llvm.set.rounding(i32 1) #0 + %sub = call float @llvm.experimental.constrained.fsub.f32(float %add1, float %add2, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %sub +} + +define float @set_rounding_round_dynamic(float %x, float %y) #0 { +; CHECK-LABEL: set_rounding_round_dynamic: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov s2, r0 +; CHECK-NEXT: vmrs r0, fpscr +; CHECK-NEXT: vmov s0, r1 +; CHECK-NEXT: vadd.f32 s4, s2, s0 +; CHECK-NEXT: orr r0, r0, #12582912 +; CHECK-NEXT: vmsr fpscr, r0 +; CHECK-NEXT: vmrs r0, fpscr +; CHECK-NEXT: vadd.f32 s0, s2, s0 +; CHECK-NEXT: bic r0, r0, #12582912 +; CHECK-NEXT: vmsr fpscr, r0 +; CHECK-NEXT: vsub.f32 s0, s4, s0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: bx lr +entry: + %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 + call void @llvm.set.rounding(i32 0) #0 + %add2 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 + call void @llvm.set.rounding(i32 1) #0 + %sub = call float @llvm.experimental.constrained.fsub.f32(float %add1, float %add2, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 + ret float %sub +} + +declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) +declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata) +declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata) +declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata) +declare i32 @llvm.get.rounding() +declare void @llvm.set.rounding(i32) + +attributes #0 = { strictfp } diff --git a/llvm/test/CodeGen/ARM/strictfp_f16_abi_promote.ll b/llvm/test/CodeGen/ARM/strictfp_f16_abi_promote.ll new file mode 100644 index 0000000..5906c79 --- /dev/null +++ b/llvm/test/CodeGen/ARM/strictfp_f16_abi_promote.ll @@ -0,0 +1,270 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=armv7-- < %s | FileCheck -check-prefix=NOFP16 %s + +declare void @f16_user(half) +declare half @f16_result() + +declare void @v2f16_user(<2 x half>) +declare <2 x half> @v2f16_result() + +declare void @v4f16_user(<4 x half>) +declare <4 x half> @v4f16_result() + +declare void @v8f16_user(<8 x half>) +declare <8 x half> @v8f16_result() + +define void @f16_arg(half %arg, ptr %ptr) #0 { +; NOFP16-LABEL: f16_arg: +; NOFP16: @ %bb.0: +; NOFP16-NEXT: push {r4, lr} +; NOFP16-NEXT: uxth r0, r0 +; NOFP16-NEXT: mov r4, r1 +; NOFP16-NEXT: bl __gnu_h2f_ieee +; NOFP16-NEXT: str r0, [r4] +; NOFP16-NEXT: pop {r4, pc} + %fpext = call float @llvm.experimental.constrained.fpext.f32.f16(half %arg, metadata !"fpexcept.strict") + store float %fpext, ptr %ptr + ret void +} + +define void @v2f16_arg(<2 x half> %arg, ptr %ptr) #0 { +; NOFP16-LABEL: v2f16_arg: +; NOFP16: @ %bb.0: +; NOFP16-NEXT: push {r4, r5, r11, lr} +; NOFP16-NEXT: vpush {d8} +; NOFP16-NEXT: mov r5, r0 +; NOFP16-NEXT: uxth r0, r1 +; NOFP16-NEXT: mov r4, r2 +; NOFP16-NEXT: bl __gnu_h2f_ieee +; NOFP16-NEXT: uxth r1, r5 +; NOFP16-NEXT: vmov s17, r0 +; NOFP16-NEXT: mov r0, r1 +; NOFP16-NEXT: bl __gnu_h2f_ieee +; NOFP16-NEXT: vmov s16, r0 +; NOFP16-NEXT: vstr d8, [r4] +; NOFP16-NEXT: vpop {d8} +; NOFP16-NEXT: pop {r4, r5, r11, pc} + %fpext = call <2 x float> @llvm.experimental.constrained.fpext.v2f32.v2f16(<2 x half> %arg, metadata !"fpexcept.strict") + store <2 x float> %fpext, ptr %ptr + ret void +} + +define void @v3f16_arg(<3 x half> %arg, ptr %ptr) #0 { +; NOFP16-LABEL: v3f16_arg: +; NOFP16: @ %bb.0: +; NOFP16-NEXT: push {r4, r5, r6, lr} +; NOFP16-NEXT: vpush {d8} +; NOFP16-NEXT: mov r6, r0 +; NOFP16-NEXT: uxth r0, r1 +; NOFP16-NEXT: mov r4, r3 +; NOFP16-NEXT: mov r5, r2 +; NOFP16-NEXT: bl __gnu_h2f_ieee +; NOFP16-NEXT: uxth r1, r6 +; NOFP16-NEXT: vmov s17, r0 +; NOFP16-NEXT: mov r0, r1 +; NOFP16-NEXT: bl __gnu_h2f_ieee +; NOFP16-NEXT: vmov s16, r0 +; NOFP16-NEXT: uxth r0, r5 +; NOFP16-NEXT: vst1.32 {d8}, [r4:64]! +; NOFP16-NEXT: bl __gnu_h2f_ieee +; NOFP16-NEXT: str r0, [r4] +; NOFP16-NEXT: vpop {d8} +; NOFP16-NEXT: pop {r4, r5, r6, pc} + %fpext = call <3 x float> @llvm.experimental.constrained.fpext.v3f32.v3f16(<3 x half> %arg, metadata !"fpexcept.strict") + store <3 x float> %fpext, ptr %ptr + ret void +} + +define void @v4f16_arg(<4 x half> %arg, ptr %ptr) #0 { +; NOFP16-LABEL: v4f16_arg: +; NOFP16: @ %bb.0: +; NOFP16-NEXT: push {r4, r5, r6, r7, r11, lr} +; NOFP16-NEXT: vpush {d8, d9} +; NOFP16-NEXT: mov r6, r0 +; NOFP16-NEXT: uxth r0, r1 +; NOFP16-NEXT: mov r4, r3 +; NOFP16-NEXT: mov r5, r2 +; NOFP16-NEXT: bl __gnu_h2f_ieee +; NOFP16-NEXT: mov r7, r0 +; NOFP16-NEXT: uxth r0, r4 +; NOFP16-NEXT: bl __gnu_h2f_ieee +; NOFP16-NEXT: vmov s19, r0 +; NOFP16-NEXT: uxth r0, r5 +; NOFP16-NEXT: ldr r4, [sp, #40] +; NOFP16-NEXT: bl __gnu_h2f_ieee +; NOFP16-NEXT: vmov s18, r0 +; NOFP16-NEXT: uxth r0, r6 +; NOFP16-NEXT: vmov s17, r7 +; NOFP16-NEXT: bl __gnu_h2f_ieee +; NOFP16-NEXT: vmov s16, r0 +; NOFP16-NEXT: vst1.64 {d8, d9}, [r4] +; NOFP16-NEXT: vpop {d8, d9} +; NOFP16-NEXT: pop {r4, r5, r6, r7, r11, pc} + %fpext = call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %arg, metadata !"fpexcept.strict") + store <4 x float> %fpext, ptr %ptr + ret void +} + + define half @f16_return(float %arg) #0 { +; NOFP16-LABEL: f16_return: +; NOFP16: @ %bb.0: +; NOFP16-NEXT: push {r11, lr} +; NOFP16-NEXT: bl __gnu_f2h_ieee +; NOFP16-NEXT: pop {r11, pc} + %fptrunc = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %arg, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret half %fptrunc + } + + define <2 x half> @v2f16_return(<2 x float> %arg) #0 { +; NOFP16-LABEL: v2f16_return: +; NOFP16: @ %bb.0: +; NOFP16-NEXT: push {r11, lr} +; NOFP16-NEXT: vpush {d8} +; NOFP16-NEXT: sub sp, sp, #8 +; NOFP16-NEXT: vmov d8, r0, r1 +; NOFP16-NEXT: vmov r0, s17 +; NOFP16-NEXT: bl __gnu_f2h_ieee +; NOFP16-NEXT: vmov r1, s16 +; NOFP16-NEXT: strh r0, [sp, #6] +; NOFP16-NEXT: mov r0, r1 +; NOFP16-NEXT: bl __gnu_f2h_ieee +; NOFP16-NEXT: strh r0, [sp, #4] +; NOFP16-NEXT: add r0, sp, #4 +; NOFP16-NEXT: vld1.32 {d16[0]}, [r0:32] +; NOFP16-NEXT: vmovl.u16 q8, d16 +; NOFP16-NEXT: vmov.32 r0, d16[0] +; NOFP16-NEXT: vmov.32 r1, d16[1] +; NOFP16-NEXT: add sp, sp, #8 +; NOFP16-NEXT: vpop {d8} +; NOFP16-NEXT: pop {r11, pc} + %fptrunc = call <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <2 x half> %fptrunc + } + + define <3 x half> @v3f16_return(<3 x float> %arg) #0 { +; NOFP16-LABEL: v3f16_return: +; NOFP16: @ %bb.0: +; NOFP16-NEXT: push {r4, r5, r6, lr} +; NOFP16-NEXT: vmov d1, r2, r3 +; NOFP16-NEXT: mov r5, r0 +; NOFP16-NEXT: vmov d0, r0, r1 +; NOFP16-NEXT: mov r4, r1 +; NOFP16-NEXT: vmov r0, s2 +; NOFP16-NEXT: bl __gnu_f2h_ieee +; NOFP16-NEXT: uxth r6, r0 +; NOFP16-NEXT: mov r0, r4 +; NOFP16-NEXT: bl __gnu_f2h_ieee +; NOFP16-NEXT: mov r4, r0 +; NOFP16-NEXT: mov r0, r5 +; NOFP16-NEXT: bl __gnu_f2h_ieee +; NOFP16-NEXT: pkhbt r0, r0, r4, lsl #16 +; NOFP16-NEXT: vmov d16, r0, r6 +; NOFP16-NEXT: vmov.u16 r0, d16[0] +; NOFP16-NEXT: vmov.u16 r1, d16[1] +; NOFP16-NEXT: vmov.u16 r2, d16[2] +; NOFP16-NEXT: pop {r4, r5, r6, pc} + %fptrunc = call <3 x half> @llvm.experimental.constrained.fptrunc.v3f16.v3f32(<3 x float> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <3 x half> %fptrunc + } + + define <4 x half> @v4f16_return(<4 x float> %arg) #0 { +; NOFP16-LABEL: v4f16_return: +; NOFP16: @ %bb.0: +; NOFP16-NEXT: push {r4, r5, r11, lr} +; NOFP16-NEXT: vpush {d8, d9} +; NOFP16-NEXT: vmov d8, r2, r3 +; NOFP16-NEXT: vmov d9, r0, r1 +; NOFP16-NEXT: vmov r2, s17 +; NOFP16-NEXT: mov r0, r2 +; NOFP16-NEXT: bl __gnu_f2h_ieee +; NOFP16-NEXT: mov r4, r0 +; NOFP16-NEXT: vmov r0, s16 +; NOFP16-NEXT: bl __gnu_f2h_ieee +; NOFP16-NEXT: vmov r1, s19 +; NOFP16-NEXT: pkhbt r5, r0, r4, lsl #16 +; NOFP16-NEXT: mov r0, r1 +; NOFP16-NEXT: bl __gnu_f2h_ieee +; NOFP16-NEXT: mov r4, r0 +; NOFP16-NEXT: vmov r0, s18 +; NOFP16-NEXT: bl __gnu_f2h_ieee +; NOFP16-NEXT: pkhbt r0, r0, r4, lsl #16 +; NOFP16-NEXT: vmov d16, r0, r5 +; NOFP16-NEXT: vmov.u16 r0, d16[0] +; NOFP16-NEXT: vmov.u16 r1, d16[1] +; NOFP16-NEXT: vmov.u16 r2, d16[2] +; NOFP16-NEXT: vmov.u16 r3, d16[3] +; NOFP16-NEXT: vpop {d8, d9} +; NOFP16-NEXT: pop {r4, r5, r11, pc} + %fptrunc = call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <4 x half> %fptrunc + } + +define void @outgoing_v4f16_return(ptr %ptr) #0 { +; NOFP16-LABEL: outgoing_v4f16_return: +; NOFP16: @ %bb.0: +; NOFP16-NEXT: push {r4, lr} +; NOFP16-NEXT: mov r4, r0 +; NOFP16-NEXT: bl v4f16_result +; NOFP16-NEXT: strh r3, [r4, #6] +; NOFP16-NEXT: strh r2, [r4, #4] +; NOFP16-NEXT: strh r1, [r4, #2] +; NOFP16-NEXT: strh r0, [r4] +; NOFP16-NEXT: pop {r4, pc} + %val = call <4 x half> @v4f16_result() #0 + store <4 x half> %val, ptr %ptr + ret void +} + +define void @outgoing_v8f16_return(ptr %ptr) #0 { +; NOFP16-LABEL: outgoing_v8f16_return: +; NOFP16: @ %bb.0: +; NOFP16-NEXT: push {r4, r10, r11, lr} +; NOFP16-NEXT: add r11, sp, #8 +; NOFP16-NEXT: sub sp, sp, #16 +; NOFP16-NEXT: bfc sp, #0, #4 +; NOFP16-NEXT: mov r4, r0 +; NOFP16-NEXT: mov r0, sp +; NOFP16-NEXT: bl v8f16_result +; NOFP16-NEXT: ldm sp, {r0, r1, r2, r3} +; NOFP16-NEXT: stm r4, {r0, r1, r2, r3} +; NOFP16-NEXT: sub sp, r11, #8 +; NOFP16-NEXT: pop {r4, r10, r11, pc} + %val = call <8 x half> @v8f16_result() #0 + store <8 x half> %val, ptr %ptr + ret void +} + +define half @call_split_type_used_outside_block_v8f16() #0 { +; NOFP16-LABEL: call_split_type_used_outside_block_v8f16: +; NOFP16: @ %bb.0: @ %bb0 +; NOFP16-NEXT: push {r4, r10, r11, lr} +; NOFP16-NEXT: add r11, sp, #8 +; NOFP16-NEXT: sub sp, sp, #16 +; NOFP16-NEXT: bfc sp, #0, #4 +; NOFP16-NEXT: mov r4, sp +; NOFP16-NEXT: mov r0, r4 +; NOFP16-NEXT: bl v8f16_result +; NOFP16-NEXT: vld1.32 {d16[0]}, [r4:32] +; NOFP16-NEXT: vmov.u16 r0, d16[0] +; NOFP16-NEXT: sub sp, r11, #8 +; NOFP16-NEXT: pop {r4, r10, r11, pc} +bb0: + %split.ret.type = call <8 x half> @v8f16_result() #0 + br label %bb1 + +bb1: + %extract = extractelement <8 x half> %split.ret.type, i32 0 + ret half %extract +} + +declare float @llvm.experimental.constrained.fpext.f32.f16(half, metadata) #0 +declare <2 x float> @llvm.experimental.constrained.fpext.v2f32.v2f16(<2 x half>, metadata) #0 +declare <3 x float> @llvm.experimental.constrained.fpext.v3f32.v3f16(<3 x half>, metadata) #0 +declare <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half>, metadata) #0 + +declare half @llvm.experimental.constrained.fptrunc.f16.f32(float, metadata, metadata) #0 +declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float>, metadata, metadata) #0 +declare <3 x half> @llvm.experimental.constrained.fptrunc.v3f16.v3f32(<3 x float>, metadata, metadata) #0 +declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float>, metadata, metadata) #0 + +attributes #0 = { strictfp } diff --git a/llvm/test/CodeGen/DirectX/Metadata/loop-md-errs.ll b/llvm/test/CodeGen/DirectX/Metadata/loop-md-errs.ll new file mode 100644 index 0000000..fbe4653 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/Metadata/loop-md-errs.ll @@ -0,0 +1,113 @@ +; RUN: split-file %s %t +; RUN: not opt -S --dxil-translate-metadata %t/args.ll 2>&1 | FileCheck %t/args.ll +; RUN: not opt -S --dxil-translate-metadata %t/bad-count.ll 2>&1 | FileCheck %t/bad-count.ll +; RUN: not opt -S --dxil-translate-metadata %t/invalid-disable.ll 2>&1 | FileCheck %t/invalid-disable.ll +; RUN: not opt -S --dxil-translate-metadata %t/invalid-full.ll 2>&1 | FileCheck %t/invalid-full.ll + +; Test that loop metadata is validated as with the DXIL validator + +;--- args.ll + +; CHECK: Invalid "llvm.loop" metadata: Provided conflicting hints + +target triple = "dxilv1.0-unknown-shadermodel6.0-library" + +define void @example_loop(i32 %n) { +entry: + br label %loop.header + +loop.header: + %i = phi i32 [ 0, %entry ], [ %i.next, %loop.body ] + %cmp = icmp slt i32 %i, %n + br i1 %cmp, label %loop.body, label %exit + +loop.body: + %i.next = add nsw i32 %i, 1 + br label %loop.header, !llvm.loop !1 + +exit: + ret void +} + +!1 = !{!1, !2, !3} ; conflicting args +!2 = !{!"llvm.loop.unroll.full"} +!3 = !{!"llvm.loop.unroll.disable"} + +;--- bad-count.ll + +; CHECK: "llvm.loop.unroll.count" must have 2 operands and the second must be a constant integer + +target triple = "dxilv1.0-unknown-shadermodel6.0-library" + +define void @example_loop(i32 %n) { +entry: + br label %loop.header + +loop.header: + %i = phi i32 [ 0, %entry ], [ %i.next, %loop.body ] + %cmp = icmp slt i32 %i, %n + br i1 %cmp, label %loop.body, label %exit + +loop.body: + %i.next = add nsw i32 %i, 1 + br label %loop.header, !llvm.loop !1 + +exit: + ret void +} + +!1 = !{!1, !2} +!2 = !{!"llvm.loop.unroll.count", !"not an int"} ; invalid count parameters + +;--- invalid-disable.ll + +; CHECK: Invalid "llvm.loop" metadata: "llvm.loop.unroll.disable" and "llvm.loop.unroll.full" must be provided as a single operand + +target triple = "dxilv1.0-unknown-shadermodel6.0-library" + +define void @example_loop(i32 %n) { +entry: + br label %loop.header + +loop.header: + %i = phi i32 [ 0, %entry ], [ %i.next, %loop.body ] + %cmp = icmp slt i32 %i, %n + br i1 %cmp, label %loop.body, label %exit + +loop.body: + %i.next = add nsw i32 %i, 1 + br label %loop.header, !llvm.loop !1 + +exit: + ret void +} + +!1 = !{!1, !2} +!2 = !{!"llvm.loop.unroll.disable", i32 0} ; invalid second operand + + +;--- invalid-full.ll + +; CHECK: Invalid "llvm.loop" metadata: "llvm.loop.unroll.disable" and "llvm.loop.unroll.full" must be provided as a single operand + +target triple = "dxilv1.0-unknown-shadermodel6.0-library" + +define void @example_loop(i32 %n) { +entry: + br label %loop.header + +loop.header: + %i = phi i32 [ 0, %entry ], [ %i.next, %loop.body ] + %cmp = icmp slt i32 %i, %n + br i1 %cmp, label %loop.body, label %exit + +loop.body: + %i.next = add nsw i32 %i, 1 + br label %loop.header, !llvm.loop !1 + +exit: + ret void +} + +!1 = !{!1, !2} +!2 = !{!"llvm.loop.unroll.full", i32 0} ; invalid second operand diff --git a/llvm/test/CodeGen/DirectX/Metadata/loop-md-stripped.ll b/llvm/test/CodeGen/DirectX/Metadata/loop-md-stripped.ll new file mode 100644 index 0000000..09d8aec --- /dev/null +++ b/llvm/test/CodeGen/DirectX/Metadata/loop-md-stripped.ll @@ -0,0 +1,58 @@ +; RUN: split-file %s %t +; RUN: opt -S --dxil-translate-metadata %t/not-distinct.ll 2>&1 | FileCheck %t/not-distinct.ll +; RUN: opt -S --dxil-translate-metadata %t/not-md.ll 2>&1 | FileCheck %t/not-md.ll + +; Test that DXIL incompatible loop metadata is stripped + +;--- not-distinct.ll + +; Ensure it is stripped because it is not provided a distinct loop parent +; CHECK-NOT: {!"llvm.loop.unroll.disable"} + +target triple = "dxilv1.0-unknown-shadermodel6.0-library" + +define void @example_loop(i32 %n) { +entry: + br label %loop.header + +loop.header: + %i = phi i32 [ 0, %entry ], [ %i.next, %loop.body ] + %cmp = icmp slt i32 %i, %n + br i1 %cmp, label %loop.body, label %exit + +loop.body: + %i.next = add nsw i32 %i, 1 + br label %loop.header, !llvm.loop !1 + +exit: + ret void +} + +!1 = !{!"llvm.loop.unroll.disable"} ; first node must be a distinct self-reference + + +;--- not-md.ll + +target triple = "dxilv1.0-unknown-shadermodel6.0-library" + +define void @example_loop(i32 %n) { +entry: + br label %loop.header + +loop.header: + %i = phi i32 [ 0, %entry ], [ %i.next, %loop.body ] + %cmp = icmp slt i32 %i, %n + br i1 %cmp, label %loop.body, label %exit + +loop.body: + %i.next = add nsw i32 %i, 1 + ; CHECK: br label %loop.header, !llvm.loop ![[#LOOP_MD:]] + br label %loop.header, !llvm.loop !1 + +exit: + ret void +} + +; CHECK: ![[#LOOP_MD:]] = distinct !{![[#LOOP_MD]]} + +!1 = !{!1, i32 0} ; second operand is not a metadata node diff --git a/llvm/test/CodeGen/DirectX/Metadata/loop-md-valid.ll b/llvm/test/CodeGen/DirectX/Metadata/loop-md-valid.ll new file mode 100644 index 0000000..a189c0e --- /dev/null +++ b/llvm/test/CodeGen/DirectX/Metadata/loop-md-valid.ll @@ -0,0 +1,95 @@ +; RUN: split-file %s %t +; RUN: opt -S --dxil-translate-metadata %t/count.ll | FileCheck %t/count.ll +; RUN: opt -S --dxil-translate-metadata %t/disable.ll | FileCheck %t/disable.ll +; RUN: opt -S --dxil-translate-metadata %t/full.ll | FileCheck %t/full.ll + +;--- count.ll + +; Test that we collapse a self-referential chain and allow a unroll.count hint + +target triple = "dxilv1.0-unknown-shadermodel6.0-library" + +define void @example_loop(i32 %n) { +entry: + br label %loop.header + +loop.header: + %i = phi i32 [ 0, %entry ], [ %i.next, %loop.body ] + %cmp = icmp slt i32 %i, %n + br i1 %cmp, label %loop.body, label %exit + +loop.body: + %i.next = add nsw i32 %i, 1 + ; CHECK: br label %loop.header, !llvm.loop ![[#LOOP_MD:]] + br label %loop.header, !llvm.loop !0 + +exit: + ret void +} + +; CHECK: ![[#LOOP_MD]] = distinct !{![[#LOOP_MD]], ![[#COUNT:]]} +; CHECK: ![[#COUNT]] = !{!"llvm.loop.unroll.count", i6 4} + +!0 = !{!0, !1} +!1 = !{!1, !2} +!2 = !{!"llvm.loop.unroll.count", i6 4} + +;--- disable.ll + +; Test that we allow a disable hint + +target triple = "dxilv1.0-unknown-shadermodel6.0-library" + +define void @example_loop(i32 %n) { +entry: + br label %loop.header + +loop.header: + %i = phi i32 [ 0, %entry ], [ %i.next, %loop.body ] + %cmp = icmp slt i32 %i, %n + br i1 %cmp, label %loop.body, label %exit + +loop.body: + %i.next = add nsw i32 %i, 1 + ; CHECK: br label %loop.header, !llvm.loop ![[#LOOP_MD:]] + br label %loop.header, !llvm.loop !0 + +exit: + ret void +} + +; CHECK: ![[#LOOP_MD]] = distinct !{![[#LOOP_MD]], ![[#DISABLE:]]} +; CHECK: ![[#DISABLE]] = !{!"llvm.loop.unroll.disable"} + +!0 = !{!0, !1} +!1 = !{!"llvm.loop.unroll.disable"} + +;--- full.ll + +; Test that we allow a full hint + +target triple = "dxilv1.0-unknown-shadermodel6.0-library" + +define void @example_loop(i32 %n) { +entry: + br label %loop.header + +loop.header: + %i = phi i32 [ 0, %entry ], [ %i.next, %loop.body ] + %cmp = icmp slt i32 %i, %n + br i1 %cmp, label %loop.body, label %exit + +loop.body: + %i.next = add nsw i32 %i, 1 + ; CHECK: br label %loop.header, !llvm.loop ![[#LOOP_MD:]] + br label %loop.header, !llvm.loop !0 + +exit: + ret void +} + +; CHECK: ![[#LOOP_MD]] = distinct !{![[#LOOP_MD]], ![[#FULL:]]} +; CHECK: ![[#FULL]] = !{!"llvm.loop.unroll.full"} + +!0 = !{!0, !1} +!1 = !{!"llvm.loop.unroll.full"} diff --git a/llvm/test/CodeGen/DirectX/Metadata/multiple-entries-cs-error.ll b/llvm/test/CodeGen/DirectX/Metadata/multiple-entries-cs-error.ll index 9697d438..5740ee1 100644 --- a/llvm/test/CodeGen/DirectX/Metadata/multiple-entries-cs-error.ll +++ b/llvm/test/CodeGen/DirectX/Metadata/multiple-entries-cs-error.ll @@ -1,4 +1,4 @@ -; RUN: not opt -S -S -dxil-translate-metadata %s 2>&1 | FileCheck %s +; RUN: not opt -S -dxil-translate-metadata %s 2>&1 | FileCheck %s target triple = "dxil-pc-shadermodel6.8-compute" ; CHECK: Non-library shader: One and only one entry expected diff --git a/llvm/test/CodeGen/DirectX/metadata-stripping.ll b/llvm/test/CodeGen/DirectX/metadata-stripping.ll index 531ab6c..53716ff 100644 --- a/llvm/test/CodeGen/DirectX/metadata-stripping.ll +++ b/llvm/test/CodeGen/DirectX/metadata-stripping.ll @@ -14,7 +14,7 @@ entry: %cmp.i = icmp ult i32 1, 2 ; Ensure that the !llvm.loop metadata node gets dropped. - ; CHECK: br i1 %cmp.i, label %_Z4mainDv3_j.exit, label %_Z4mainDv3_j.exit{{$}} + ; CHECK: br i1 %cmp.i, label %_Z4mainDv3_j.exit, label %_Z4mainDv3_j.exit, !llvm.loop [[LOOPMD:![0-9]+]] br i1 %cmp.i, label %_Z4mainDv3_j.exit, label %_Z4mainDv3_j.exit, !llvm.loop !0 _Z4mainDv3_j.exit: ; preds = %for.body.i, %entry @@ -25,7 +25,8 @@ _Z4mainDv3_j.exit: ; preds = %for.body.i, %entry ; No more metadata should be necessary, the rest (the current 0 and 1) ; should be removed. ; CHECK-NOT: !{!"llvm.loop.mustprogress"} -; CHECK: [[RANGEMD]] = !{i32 1, i32 5} +; CHECK-DAG: [[RANGEMD]] = !{i32 1, i32 5} +; CHECK-DAG: [[LOOPMD]] = distinct !{[[LOOPMD]]} ; CHECK-NOT: !{!"llvm.loop.mustprogress"} !0 = distinct !{!0, !1} !1 = !{!"llvm.loop.mustprogress"} diff --git a/llvm/test/CodeGen/DirectX/strip-module-md.ll b/llvm/test/CodeGen/DirectX/strip-module-md.ll new file mode 100644 index 0000000..4d8b9ec --- /dev/null +++ b/llvm/test/CodeGen/DirectX/strip-module-md.ll @@ -0,0 +1,75 @@ +; RUN: opt -S -dxil-translate-metadata < %s | FileCheck %s + +; Ensures that only metadata explictly specified on the allow list, or debug +; related, metadata is emitted + +target triple = "dxil-unknown-shadermodel6.0-compute" + +; CHECK-NOT: !dx.rootsignatures +; CHECK-NOT: !llvm.errno.tbaa + +; CHECK-DAG: !llvm.dbg.cu + +; CHECK-DAG: !llvm.module.flags = !{![[#DWARF_VER:]], ![[#DEBUG_VER:]]} +; CHECK-DAG: !llvm.ident = !{![[#IDENT:]]} + +; CHECK-DAG: !dx.shaderModel +; CHECK-DAG: !dx.version +; CHECK-DAG: !dx.entryPoints +; CHECK-DAG: !dx.valver +; CHECK-DAG: !dx.resources + +; CHECK-NOT: !dx.rootsignatures +; CHECK-NOT: !llvm.errno.tbaa + +; Check allowed llvm metadata structure to ensure it is still DXIL compatible +; If this fails, please ensure that the updated form is DXIL compatible before +; updating the test. + +; CHECK-DAG: ![[#IDENT]] = !{!"clang 22.0.0"} +; CHECK-DAG: ![[#DWARF_VER]] = !{i32 2, !"Dwarf Version", i32 2} +; CHECK-DAG: ![[#DEBUG_VER]] = !{i32 2, !"Debug Info Version", i32 3} + +; CHECK-NOT: !dx.rootsignatures +; CHECK-NOT: !llvm.errno.tbaa + +@BufA.str = private unnamed_addr constant [5 x i8] c"BufA\00", align 1 + +define void @main () #0 { +entry: + %typed0 = call target("dx.TypedBuffer", <4 x float>, 1, 0, 0) + @llvm.dx.resource.handlefrombinding.tdx.TypedBuffer_v4f32_1_0_0( + i32 3, i32 5, i32 1, i32 0, ptr @BufA.str) + ret void +} + +attributes #0 = { noinline nounwind "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + +; Incompatible +!dx.rootsignatures = !{!2} +!llvm.errno.tbaa = !{!5} + +; Compatible +!llvm.dbg.cu = !{!8} +!llvm.module.flags = !{!11, !12} +!llvm.ident = !{!13} +!dx.valver = !{!14} + +!2 = !{ ptr @main, !3, i32 2 } +!3 = !{ !4 } +!4 = !{ !"RootFlags", i32 1 } + +!5 = !{!6, !6, i64 0} +!6 = !{!"omnipotent char", !7} +!7 = !{!"Simple C/C++ TBAA"} + +!8 = distinct !DICompileUnit(language: DW_LANG_C99, file: !9, producer: "Some Compiler", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !10, splitDebugInlining: false, nameTableKind: None) +!9 = !DIFile(filename: "hlsl.hlsl", directory: "/some-path") +!10 = !{} + +!11 = !{i32 2, !"Dwarf Version", i32 2} +!12 = !{i32 2, !"Debug Info Version", i32 3} + +!13 = !{!"clang 22.0.0"} + +!14 = !{i32 1, i32 1} diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/avg.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/avg.ll new file mode 100644 index 0000000..2a5a8fa --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/avg.ll @@ -0,0 +1,307 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +define void @xvavg_b(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavg_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvsrai.b $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <32 x i8>, ptr %a + %vb = load <32 x i8>, ptr %b + %add = add <32 x i8> %va, %vb + %shr = ashr <32 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + store <32 x i8> %shr, ptr %res + ret void +} + +define void @xvavg_h(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavg_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvsrai.h $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <16 x i16>, ptr %a + %vb = load <16 x i16>, ptr %b + %add = add <16 x i16> %va, %vb + %shr = ashr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + store <16 x i16> %shr, ptr %res + ret void +} + +define void @xvavg_w(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavg_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvsrai.w $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %add = add <8 x i32> %va, %vb + %shr = ashr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + store <8 x i32> %shr, ptr %res + ret void +} + +define void @xvavg_d(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavg_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvsrai.d $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %add = add <4 x i64> %va, %vb + %shr = ashr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1> + store <4 x i64> %shr, ptr %res + ret void +} + +define void @xvavg_bu(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavg_bu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvsrli.b $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <32 x i8>, ptr %a + %vb = load <32 x i8>, ptr %b + %add = add <32 x i8> %va, %vb + %shr = lshr <32 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + store <32 x i8> %shr, ptr %res + ret void +} + +define void @xvavg_hu(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavg_hu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvsrli.h $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <16 x i16>, ptr %a + %vb = load <16 x i16>, ptr %b + %add = add <16 x i16> %va, %vb + %shr = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + store <16 x i16> %shr, ptr %res + ret void +} + +define void @xvavg_wu(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavg_wu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvsrli.w $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %add = add <8 x i32> %va, %vb + %shr = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + store <8 x i32> %shr, ptr %res + ret void +} + +define void @xvavg_du(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavg_du: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvsrli.d $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %add = add <4 x i64> %va, %vb + %shr = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1> + store <4 x i64> %shr, ptr %res + ret void +} + +define void @xvavgr_b(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavgr_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvaddi.bu $xr0, $xr0, 1 +; CHECK-NEXT: xvsrai.b $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <32 x i8>, ptr %a + %vb = load <32 x i8>, ptr %b + %add = add <32 x i8> %va, %vb + %add1 = add <32 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + %shr = ashr <32 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + store <32 x i8> %shr, ptr %res + ret void +} + +define void @xvavgr_h(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavgr_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvaddi.hu $xr0, $xr0, 1 +; CHECK-NEXT: xvsrai.h $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <16 x i16>, ptr %a + %vb = load <16 x i16>, ptr %b + %add = add <16 x i16> %va, %vb + %add1 = add <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + %shr = ashr <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + store <16 x i16> %shr, ptr %res + ret void +} + +define void @xvavgr_w(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavgr_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvaddi.wu $xr0, $xr0, 1 +; CHECK-NEXT: xvsrai.w $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %add = add <8 x i32> %va, %vb + %add1 = add <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + %shr = ashr <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + store <8 x i32> %shr, ptr %res + ret void +} + +define void @xvavgr_d(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavgr_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvaddi.du $xr0, $xr0, 1 +; CHECK-NEXT: xvsrai.d $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %add = add <4 x i64> %va, %vb + %add1 = add <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1> + %shr = ashr <4 x i64> %add1, <i64 1, i64 1, i64 1, i64 1> + store <4 x i64> %shr, ptr %res + ret void +} + +define void @xvavgr_bu(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavgr_bu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvaddi.bu $xr0, $xr0, 1 +; CHECK-NEXT: xvsrli.b $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <32 x i8>, ptr %a + %vb = load <32 x i8>, ptr %b + %add = add <32 x i8> %va, %vb + %add1 = add <32 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + %shr = lshr <32 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + store <32 x i8> %shr, ptr %res + ret void +} + +define void @xvavgr_hu(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavgr_hu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvaddi.hu $xr0, $xr0, 1 +; CHECK-NEXT: xvsrli.h $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <16 x i16>, ptr %a + %vb = load <16 x i16>, ptr %b + %add = add <16 x i16> %va, %vb + %add1 = add <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + %shr = lshr <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + store <16 x i16> %shr, ptr %res + ret void +} + +define void @xvavgr_wu(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavgr_wu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvaddi.wu $xr0, $xr0, 1 +; CHECK-NEXT: xvsrli.w $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %add = add <8 x i32> %va, %vb + %add1 = add <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + %shr = lshr <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + store <8 x i32> %shr, ptr %res + ret void +} + +define void @xvavgr_du(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvavgr_du: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvaddi.du $xr0, $xr0, 1 +; CHECK-NEXT: xvsrli.d $xr0, $xr0, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %add = add <4 x i64> %va, %vb + %add1 = add <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1> + %shr = lshr <4 x i64> %add1, <i64 1, i64 1, i64 1, i64 1> + store <4 x i64> %shr, ptr %res + ret void +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/avg.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/avg.ll new file mode 100644 index 0000000..20b88984 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/avg.ll @@ -0,0 +1,307 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s + +define void @vavg_b(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavg_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vsrai.b $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <16 x i8>, ptr %a + %vb = load <16 x i8>, ptr %b + %add = add <16 x i8> %va, %vb + %shr = ashr <16 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + store <16 x i8> %shr, ptr %res + ret void +} + +define void @vavg_h(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavg_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vsrai.h $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i16>, ptr %a + %vb = load <8 x i16>, ptr %b + %add = add <8 x i16> %va, %vb + %shr = ashr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + store <8 x i16> %shr, ptr %res + ret void +} + +define void @vavg_w(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavg_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vsrai.w $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i32>, ptr %a + %vb = load <4 x i32>, ptr %b + %add = add <4 x i32> %va, %vb + %shr = ashr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1> + store <4 x i32> %shr, ptr %res + ret void +} + +define void @vavg_d(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavg_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vsrai.d $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <2 x i64>, ptr %a + %vb = load <2 x i64>, ptr %b + %add = add <2 x i64> %va, %vb + %shr = ashr <2 x i64> %add, <i64 1, i64 1> + store <2 x i64> %shr, ptr %res + ret void +} + +define void @vavg_bu(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavg_bu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vsrli.b $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <16 x i8>, ptr %a + %vb = load <16 x i8>, ptr %b + %add = add <16 x i8> %va, %vb + %shr = lshr <16 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + store <16 x i8> %shr, ptr %res + ret void +} + +define void @vavg_hu(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavg_hu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vsrli.h $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i16>, ptr %a + %vb = load <8 x i16>, ptr %b + %add = add <8 x i16> %va, %vb + %shr = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + store <8 x i16> %shr, ptr %res + ret void +} + +define void @vavg_wu(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavg_wu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vsrli.w $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i32>, ptr %a + %vb = load <4 x i32>, ptr %b + %add = add <4 x i32> %va, %vb + %shr = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1> + store <4 x i32> %shr, ptr %res + ret void +} + +define void @vavg_du(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavg_du: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vsrli.d $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <2 x i64>, ptr %a + %vb = load <2 x i64>, ptr %b + %add = add <2 x i64> %va, %vb + %shr = lshr <2 x i64> %add, <i64 1, i64 1> + store <2 x i64> %shr, ptr %res + ret void +} + +define void @vavgr_b(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavgr_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vaddi.bu $vr0, $vr0, 1 +; CHECK-NEXT: vsrai.b $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <16 x i8>, ptr %a + %vb = load <16 x i8>, ptr %b + %add = add <16 x i8> %va, %vb + %add1 = add <16 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + %shr = ashr <16 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + store <16 x i8> %shr, ptr %res + ret void +} + +define void @vavgr_h(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavgr_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vaddi.hu $vr0, $vr0, 1 +; CHECK-NEXT: vsrai.h $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i16>, ptr %a + %vb = load <8 x i16>, ptr %b + %add = add <8 x i16> %va, %vb + %add1 = add <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + %shr = ashr <8 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + store <8 x i16> %shr, ptr %res + ret void +} + +define void @vavgr_w(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavgr_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vaddi.wu $vr0, $vr0, 1 +; CHECK-NEXT: vsrai.w $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i32>, ptr %a + %vb = load <4 x i32>, ptr %b + %add = add <4 x i32> %va, %vb + %add1 = add <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1> + %shr = ashr <4 x i32> %add1, <i32 1, i32 1, i32 1, i32 1> + store <4 x i32> %shr, ptr %res + ret void +} + +define void @vavgr_d(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavgr_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vaddi.du $vr0, $vr0, 1 +; CHECK-NEXT: vsrai.d $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <2 x i64>, ptr %a + %vb = load <2 x i64>, ptr %b + %add = add <2 x i64> %va, %vb + %add1 = add <2 x i64> %add, <i64 1, i64 1> + %shr = ashr <2 x i64> %add1, <i64 1, i64 1> + store <2 x i64> %shr, ptr %res + ret void +} + +define void @vavgr_bu(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavgr_bu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vaddi.bu $vr0, $vr0, 1 +; CHECK-NEXT: vsrli.b $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <16 x i8>, ptr %a + %vb = load <16 x i8>, ptr %b + %add = add <16 x i8> %va, %vb + %add1 = add <16 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + %shr = lshr <16 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> + store <16 x i8> %shr, ptr %res + ret void +} + +define void @vavgr_hu(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavgr_hu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vaddi.hu $vr0, $vr0, 1 +; CHECK-NEXT: vsrli.h $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i16>, ptr %a + %vb = load <8 x i16>, ptr %b + %add = add <8 x i16> %va, %vb + %add1 = add <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + %shr = lshr <8 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + store <8 x i16> %shr, ptr %res + ret void +} + +define void @vavgr_wu(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavgr_wu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vaddi.wu $vr0, $vr0, 1 +; CHECK-NEXT: vsrli.w $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i32>, ptr %a + %vb = load <4 x i32>, ptr %b + %add = add <4 x i32> %va, %vb + %add1 = add <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1> + %shr = lshr <4 x i32> %add1, <i32 1, i32 1, i32 1, i32 1> + store <4 x i32> %shr, ptr %res + ret void +} + +define void @vavgr_du(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vavgr_du: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vaddi.du $vr0, $vr0, 1 +; CHECK-NEXT: vsrli.d $vr0, $vr0, 1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <2 x i64>, ptr %a + %vb = load <2 x i64>, ptr %b + %add = add <2 x i64> %va, %vb + %add1 = add <2 x i64> %add, <i64 1, i64 1> + %shr = lshr <2 x i64> %add1, <i64 1, i64 1> + store <2 x i64> %shr, ptr %res + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll b/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll new file mode 100644 index 0000000..f2ccf3e --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll @@ -0,0 +1,738 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -mcpu=sm_20 | FileCheck %s +; RUN: %if ptxas %{ llc < %s -mcpu=sm_20 | %ptxas-verify %} +target triple = "nvptx64-nvidia-cuda" + +; Test dynamic insertelt at the beginning of a chain +define <4 x i32> @dynamic_at_beginning(i32 %idx) { +; CHECK-LABEL: dynamic_at_beginning( +; CHECK: { +; CHECK-NEXT: .local .align 4 .b8 __local_depot0[16]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-NEXT: .reg .b64 %rd<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot0; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_at_beginning_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 3; +; CHECK-NEXT: shl.b64 %rd3, %rd2, 2; +; CHECK-NEXT: add.u64 %rd4, %SP, 0; +; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3; +; CHECK-NEXT: st.b32 [%rd5], 10; +; CHECK-NEXT: ld.b32 %r1, [%SP+12]; +; CHECK-NEXT: ld.b32 %r2, [%SP]; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r2, 20, 30, %r1}; +; CHECK-NEXT: ret; + %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx + %v1 = insertelement <4 x i32> %v0, i32 20, i32 1 + %v2 = insertelement <4 x i32> %v1, i32 30, i32 2 + ret <4 x i32> %v2 +} + +; Test dynamic insertelt at the end of a chain +define <4 x i32> @dynamic_at_end(i32 %idx) { +; CHECK-LABEL: dynamic_at_end( +; CHECK: { +; CHECK-NEXT: .local .align 4 .b8 __local_depot1[16]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot1; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_at_end_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 3; +; CHECK-NEXT: shl.b64 %rd3, %rd2, 2; +; CHECK-NEXT: add.u64 %rd4, %SP, 0; +; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3; +; CHECK-NEXT: st.b32 [%SP+4], 20; +; CHECK-NEXT: st.b32 [%SP], 10; +; CHECK-NEXT: st.b32 [%rd5], 30; +; CHECK-NEXT: ld.b32 %r1, [%SP+12]; +; CHECK-NEXT: ld.b32 %r2, [%SP+8]; +; CHECK-NEXT: ld.b32 %r3, [%SP+4]; +; CHECK-NEXT: ld.b32 %r4, [%SP]; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1}; +; CHECK-NEXT: ret; + %v0 = insertelement <4 x i32> poison, i32 10, i32 0 + %v1 = insertelement <4 x i32> %v0, i32 20, i32 1 + %v2 = insertelement <4 x i32> %v1, i32 30, i32 %idx + ret <4 x i32> %v2 +} + +; Test dynamic insertelt in the middle of a chain +define <4 x i32> @dynamic_in_middle(i32 %idx) { +; CHECK-LABEL: dynamic_in_middle( +; CHECK: { +; CHECK-NEXT: .local .align 4 .b8 __local_depot2[16]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot2; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_in_middle_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 3; +; CHECK-NEXT: shl.b64 %rd3, %rd2, 2; +; CHECK-NEXT: add.u64 %rd4, %SP, 0; +; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3; +; CHECK-NEXT: st.b32 [%SP], 10; +; CHECK-NEXT: st.b32 [%rd5], 20; +; CHECK-NEXT: ld.b32 %r1, [%SP+12]; +; CHECK-NEXT: ld.b32 %r2, [%SP+4]; +; CHECK-NEXT: ld.b32 %r3, [%SP]; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r3, %r2, 30, %r1}; +; CHECK-NEXT: ret; + %v0 = insertelement <4 x i32> poison, i32 10, i32 0 + %v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx + %v2 = insertelement <4 x i32> %v1, i32 30, i32 2 + ret <4 x i32> %v2 +} + +; Test repeated dynamic insertelt with the same index +define <4 x i32> @repeated_same_index(i32 %idx) { +; CHECK-LABEL: repeated_same_index( +; CHECK: { +; CHECK-NEXT: .local .align 4 .b8 __local_depot3[16]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot3; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [repeated_same_index_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 3; +; CHECK-NEXT: shl.b64 %rd3, %rd2, 2; +; CHECK-NEXT: add.u64 %rd4, %SP, 0; +; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3; +; CHECK-NEXT: st.b32 [%rd5], 20; +; CHECK-NEXT: ld.b32 %r1, [%SP+12]; +; CHECK-NEXT: ld.b32 %r2, [%SP+8]; +; CHECK-NEXT: ld.b32 %r3, [%SP+4]; +; CHECK-NEXT: ld.b32 %r4, [%SP]; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1}; +; CHECK-NEXT: ret; + %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx + %v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx + ret <4 x i32> %v1 +} + +; Test multiple dynamic insertelts +define <4 x i32> @multiple_dynamic(i32 %idx0, i32 %idx1) { +; CHECK-LABEL: multiple_dynamic( +; CHECK: { +; CHECK-NEXT: .local .align 4 .b8 __local_depot4[16]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<10>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot4; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [multiple_dynamic_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 3; +; CHECK-NEXT: shl.b64 %rd3, %rd2, 2; +; CHECK-NEXT: add.u64 %rd4, %SP, 0; +; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3; +; CHECK-NEXT: st.b32 [%rd5], 10; +; CHECK-NEXT: ld.param.b32 %rd6, [multiple_dynamic_param_1]; +; CHECK-NEXT: and.b64 %rd7, %rd6, 3; +; CHECK-NEXT: shl.b64 %rd8, %rd7, 2; +; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8; +; CHECK-NEXT: st.b32 [%rd9], 20; +; CHECK-NEXT: ld.b32 %r1, [%SP+12]; +; CHECK-NEXT: ld.b32 %r2, [%SP+8]; +; CHECK-NEXT: ld.b32 %r3, [%SP+4]; +; CHECK-NEXT: ld.b32 %r4, [%SP]; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1}; +; CHECK-NEXT: ret; + %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0 + %v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx1 + ret <4 x i32> %v1 +} + +; Test chain with all dynamic insertelts +define <4 x i32> @all_dynamic(i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) { +; CHECK-LABEL: all_dynamic( +; CHECK: { +; CHECK-NEXT: .local .align 4 .b8 __local_depot5[16]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<18>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot5; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [all_dynamic_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 3; +; CHECK-NEXT: shl.b64 %rd3, %rd2, 2; +; CHECK-NEXT: add.u64 %rd4, %SP, 0; +; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3; +; CHECK-NEXT: ld.param.b32 %rd6, [all_dynamic_param_1]; +; CHECK-NEXT: and.b64 %rd7, %rd6, 3; +; CHECK-NEXT: shl.b64 %rd8, %rd7, 2; +; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8; +; CHECK-NEXT: ld.param.b32 %rd10, [all_dynamic_param_2]; +; CHECK-NEXT: and.b64 %rd11, %rd10, 3; +; CHECK-NEXT: shl.b64 %rd12, %rd11, 2; +; CHECK-NEXT: add.s64 %rd13, %rd4, %rd12; +; CHECK-NEXT: st.b32 [%rd5], 10; +; CHECK-NEXT: st.b32 [%rd9], 20; +; CHECK-NEXT: st.b32 [%rd13], 30; +; CHECK-NEXT: ld.param.b32 %rd14, [all_dynamic_param_3]; +; CHECK-NEXT: and.b64 %rd15, %rd14, 3; +; CHECK-NEXT: shl.b64 %rd16, %rd15, 2; +; CHECK-NEXT: add.s64 %rd17, %rd4, %rd16; +; CHECK-NEXT: st.b32 [%rd17], 40; +; CHECK-NEXT: ld.b32 %r1, [%SP+12]; +; CHECK-NEXT: ld.b32 %r2, [%SP+8]; +; CHECK-NEXT: ld.b32 %r3, [%SP+4]; +; CHECK-NEXT: ld.b32 %r4, [%SP]; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1}; +; CHECK-NEXT: ret; + %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0 + %v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx1 + %v2 = insertelement <4 x i32> %v1, i32 30, i32 %idx2 + %v3 = insertelement <4 x i32> %v2, i32 40, i32 %idx3 + ret <4 x i32> %v3 +} + +; Test mixed constant and dynamic insertelts with high ratio of dynamic ones. +; Should lower all insertelts to stores. +define <4 x i32> @mix_dynamic_constant(i32 %idx0, i32 %idx1) { +; CHECK-LABEL: mix_dynamic_constant( +; CHECK: { +; CHECK-NEXT: .local .align 4 .b8 __local_depot6[16]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<10>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot6; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [mix_dynamic_constant_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 3; +; CHECK-NEXT: shl.b64 %rd3, %rd2, 2; +; CHECK-NEXT: add.u64 %rd4, %SP, 0; +; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3; +; CHECK-NEXT: st.b32 [%rd5], 10; +; CHECK-NEXT: ld.param.b32 %rd6, [mix_dynamic_constant_param_1]; +; CHECK-NEXT: and.b64 %rd7, %rd6, 3; +; CHECK-NEXT: shl.b64 %rd8, %rd7, 2; +; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8; +; CHECK-NEXT: st.b32 [%SP+4], 20; +; CHECK-NEXT: st.b32 [%rd9], 30; +; CHECK-NEXT: ld.b32 %r1, [%SP+12]; +; CHECK-NEXT: ld.b32 %r2, [%SP+8]; +; CHECK-NEXT: ld.b32 %r3, [%SP+4]; +; CHECK-NEXT: ld.b32 %r4, [%SP]; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1}; +; CHECK-NEXT: ret; + %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0 + %v1 = insertelement <4 x i32> %v0, i32 20, i32 1 + %v2 = insertelement <4 x i32> %v1, i32 30, i32 %idx1 + ret <4 x i32> %v2 +} + +; Test two separate chains that don't interfere +define void @two_separate_chains(i32 %idx0, i32 %idx1, ptr %out0, ptr %out1) { +; CHECK-LABEL: two_separate_chains( +; CHECK: { +; CHECK-NEXT: .local .align 4 .b8 __local_depot7[32]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<13>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot7; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [two_separate_chains_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 3; +; CHECK-NEXT: shl.b64 %rd3, %rd2, 2; +; CHECK-NEXT: add.u64 %rd4, %SP, 16; +; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3; +; CHECK-NEXT: st.b32 [%rd5], 10; +; CHECK-NEXT: ld.param.b32 %rd6, [two_separate_chains_param_1]; +; CHECK-NEXT: and.b64 %rd7, %rd6, 3; +; CHECK-NEXT: shl.b64 %rd8, %rd7, 2; +; CHECK-NEXT: add.u64 %rd9, %SP, 0; +; CHECK-NEXT: add.s64 %rd10, %rd9, %rd8; +; CHECK-NEXT: ld.b32 %r1, [%SP+28]; +; CHECK-NEXT: ld.b32 %r2, [%SP+24]; +; CHECK-NEXT: ld.b32 %r3, [%SP+16]; +; CHECK-NEXT: ld.param.b64 %rd11, [two_separate_chains_param_2]; +; CHECK-NEXT: st.b32 [%rd10], 30; +; CHECK-NEXT: ld.param.b64 %rd12, [two_separate_chains_param_3]; +; CHECK-NEXT: ld.b32 %r4, [%SP+12]; +; CHECK-NEXT: ld.b32 %r5, [%SP+4]; +; CHECK-NEXT: ld.b32 %r6, [%SP]; +; CHECK-NEXT: st.v4.b32 [%rd11], {%r3, 20, %r2, %r1}; +; CHECK-NEXT: st.v4.b32 [%rd12], {%r6, %r5, 40, %r4}; +; CHECK-NEXT: ret; + ; Chain 1 + %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0 + %v1 = insertelement <4 x i32> %v0, i32 20, i32 1 + + ; Chain 2 + %w0 = insertelement <4 x i32> poison, i32 30, i32 %idx1 + %w1 = insertelement <4 x i32> %w0, i32 40, i32 2 + + store <4 x i32> %v1, ptr %out0 + store <4 x i32> %w1, ptr %out1 + ret void +} + +; Test overlapping chains (chain 2 starts from middle of chain 1) +define void @overlapping_chains(i32 %idx0, i32 %idx1, ptr %out0, ptr %out1) { +; CHECK-LABEL: overlapping_chains( +; CHECK: { +; CHECK-NEXT: .local .align 4 .b8 __local_depot8[32]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<14>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot8; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [overlapping_chains_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 3; +; CHECK-NEXT: shl.b64 %rd3, %rd2, 2; +; CHECK-NEXT: add.u64 %rd4, %SP, 16; +; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3; +; CHECK-NEXT: st.b32 [%rd5], 10; +; CHECK-NEXT: add.u64 %rd6, %SP, 0; +; CHECK-NEXT: add.s64 %rd7, %rd6, %rd3; +; CHECK-NEXT: ld.b32 %r1, [%SP+28]; +; CHECK-NEXT: ld.b32 %r2, [%SP+16]; +; CHECK-NEXT: ld.param.b64 %rd8, [overlapping_chains_param_2]; +; CHECK-NEXT: st.b32 [%rd7], 10; +; CHECK-NEXT: ld.param.b32 %rd9, [overlapping_chains_param_1]; +; CHECK-NEXT: and.b64 %rd10, %rd9, 3; +; CHECK-NEXT: shl.b64 %rd11, %rd10, 2; +; CHECK-NEXT: add.s64 %rd12, %rd6, %rd11; +; CHECK-NEXT: st.b32 [%SP+4], 20; +; CHECK-NEXT: st.b32 [%rd12], 30; +; CHECK-NEXT: ld.param.b64 %rd13, [overlapping_chains_param_3]; +; CHECK-NEXT: ld.b32 %r3, [%SP+12]; +; CHECK-NEXT: ld.b32 %r4, [%SP+8]; +; CHECK-NEXT: ld.b32 %r5, [%SP+4]; +; CHECK-NEXT: ld.b32 %r6, [%SP]; +; CHECK-NEXT: st.v4.b32 [%rd8], {%r2, 20, 40, %r1}; +; CHECK-NEXT: st.v4.b32 [%rd13], {%r6, %r5, %r4, %r3}; +; CHECK-NEXT: ret; + %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0 + %v1 = insertelement <4 x i32> %v0, i32 20, i32 1 + + ; Chain 2 starts from v1 + %w0 = insertelement <4 x i32> %v1, i32 30, i32 %idx1 + + ; Continue chain 1 + %v2 = insertelement <4 x i32> %v1, i32 40, i32 2 + + store <4 x i32> %v2, ptr %out0 + store <4 x i32> %w0, ptr %out1 + ret void +} + +; Test with i1 elements (1-bit, non-byte-aligned) +define <8 x i1> @dynamic_i1(i32 %idx) { +; CHECK-LABEL: dynamic_i1( +; CHECK: { +; CHECK-NEXT: .local .align 8 .b8 __local_depot9[8]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot9; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i1_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 7; +; CHECK-NEXT: add.u64 %rd3, %SP, 0; +; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2; +; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2}; +; CHECK-NEXT: st.b8 [%rd4], 1; +; CHECK-NEXT: ld.b32 %r3, [%SP]; +; CHECK-NEXT: prmt.b32 %r4, %r3, 0, 0x7773U; +; CHECK-NEXT: ld.b32 %r5, [%SP+4]; +; CHECK-NEXT: prmt.b32 %r6, %r5, 0, 0x7771U; +; CHECK-NEXT: prmt.b32 %r7, %r5, 0, 0x7772U; +; CHECK-NEXT: prmt.b32 %r8, %r5, 0, 0x7773U; +; CHECK-NEXT: st.param.b8 [func_retval0+4], %r5; +; CHECK-NEXT: st.param.b8 [func_retval0], %r3; +; CHECK-NEXT: st.param.b8 [func_retval0+7], %r8; +; CHECK-NEXT: st.param.b8 [func_retval0+6], %r7; +; CHECK-NEXT: st.param.b8 [func_retval0+5], %r6; +; CHECK-NEXT: st.param.b8 [func_retval0+3], %r4; +; CHECK-NEXT: st.param.b8 [func_retval0+2], 1; +; CHECK-NEXT: st.param.b8 [func_retval0+1], 0; +; CHECK-NEXT: ret; + %v0 = insertelement <8 x i1> poison, i1 1, i32 %idx + %v1 = insertelement <8 x i1> %v0, i1 0, i32 1 + %v2 = insertelement <8 x i1> %v1, i1 1, i32 2 + ret <8 x i1> %v2 +} + +; Test with i2 elements (2-bit, non-byte-aligned) +define <8 x i2> @dynamic_i2(i32 %idx) { +; CHECK-LABEL: dynamic_i2( +; CHECK: { +; CHECK-NEXT: .local .align 8 .b8 __local_depot10[16]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b16 %rs<24>; +; CHECK-NEXT: .reg .b32 %r<10>; +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot10; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i2_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 7; +; CHECK-NEXT: add.u64 %rd3, %SP, 0; +; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2; +; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2}; +; CHECK-NEXT: st.b8 [%rd4], 1; +; CHECK-NEXT: ld.b32 %r3, [%SP+4]; +; CHECK-NEXT: cvt.u16.u32 %rs1, %r3; +; CHECK-NEXT: and.b16 %rs2, %rs1, 3; +; CHECK-NEXT: prmt.b32 %r4, %r3, 0, 0x7771U; +; CHECK-NEXT: cvt.u16.u32 %rs3, %r4; +; CHECK-NEXT: and.b16 %rs4, %rs3, 3; +; CHECK-NEXT: shl.b16 %rs5, %rs4, 2; +; CHECK-NEXT: or.b16 %rs6, %rs2, %rs5; +; CHECK-NEXT: prmt.b32 %r5, %r3, 0, 0x7772U; +; CHECK-NEXT: cvt.u16.u32 %rs7, %r5; +; CHECK-NEXT: and.b16 %rs8, %rs7, 3; +; CHECK-NEXT: shl.b16 %rs9, %rs8, 4; +; CHECK-NEXT: or.b16 %rs10, %rs6, %rs9; +; CHECK-NEXT: prmt.b32 %r6, %r3, 0, 0x7773U; +; CHECK-NEXT: cvt.u16.u32 %rs11, %r6; +; CHECK-NEXT: shl.b16 %rs12, %rs11, 6; +; CHECK-NEXT: or.b16 %rs13, %rs10, %rs12; +; CHECK-NEXT: st.b8 [%SP+8], %rs13; +; CHECK-NEXT: ld.b32 %r7, [%SP]; +; CHECK-NEXT: prmt.b32 %r8, %r7, 0, 0x7773U; +; CHECK-NEXT: cvt.u16.u32 %rs14, %r8; +; CHECK-NEXT: shl.b16 %rs15, %rs14, 6; +; CHECK-NEXT: and.b16 %rs16, %rs15, 192; +; CHECK-NEXT: ld.s8 %rs17, [%SP+8]; +; CHECK-NEXT: shl.b16 %rs18, %rs17, 8; +; CHECK-NEXT: or.b16 %rs19, %rs16, %rs18; +; CHECK-NEXT: prmt.b32 %r9, %r7, 0, 0x7770U; +; CHECK-NEXT: st.param.b16 [func_retval0], %r9; +; CHECK-NEXT: st.param.b16 [func_retval0+8], %rs17; +; CHECK-NEXT: shr.s16 %rs20, %rs18, 14; +; CHECK-NEXT: st.param.b16 [func_retval0+14], %rs20; +; CHECK-NEXT: shr.s16 %rs21, %rs18, 12; +; CHECK-NEXT: st.param.b16 [func_retval0+12], %rs21; +; CHECK-NEXT: shr.s16 %rs22, %rs18, 10; +; CHECK-NEXT: st.param.b16 [func_retval0+10], %rs22; +; CHECK-NEXT: shr.s16 %rs23, %rs19, 6; +; CHECK-NEXT: st.param.b16 [func_retval0+6], %rs23; +; CHECK-NEXT: st.param.b16 [func_retval0+4], 3; +; CHECK-NEXT: st.param.b16 [func_retval0+2], 2; +; CHECK-NEXT: ret; + %v0 = insertelement <8 x i2> poison, i2 1, i32 %idx + %v1 = insertelement <8 x i2> %v0, i2 2, i32 1 + %v2 = insertelement <8 x i2> %v1, i2 3, i32 2 + ret <8 x i2> %v2 +} + +; Test with i3 elements (3-bit, non-byte-aligned) +define <8 x i3> @dynamic_i3(i32 %idx) { +; CHECK-LABEL: dynamic_i3( +; CHECK: { +; CHECK-NEXT: .local .align 8 .b8 __local_depot11[8]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b16 %rs<5>; +; CHECK-NEXT: .reg .b32 %r<15>; +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot11; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i3_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 7; +; CHECK-NEXT: add.u64 %rd3, %SP, 0; +; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2; +; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2}; +; CHECK-NEXT: st.b8 [%rd4], 1; +; CHECK-NEXT: ld.b32 %r3, [%SP]; +; CHECK-NEXT: ld.b32 %r4, [%SP+4]; +; CHECK-NEXT: prmt.b32 %r5, %r4, 0, 0x7773U; +; CHECK-NEXT: prmt.b32 %r6, %r4, 0, 0x7772U; +; CHECK-NEXT: prmt.b32 %r7, %r6, %r5, 0x5410U; +; CHECK-NEXT: st.param.b32 [func_retval0+12], %r7; +; CHECK-NEXT: prmt.b32 %r8, %r4, 0, 0x7771U; +; CHECK-NEXT: prmt.b32 %r9, %r4, 0, 0x7770U; +; CHECK-NEXT: prmt.b32 %r10, %r9, %r8, 0x5410U; +; CHECK-NEXT: st.param.b32 [func_retval0+8], %r10; +; CHECK-NEXT: prmt.b32 %r11, %r3, 0, 0x7773U; +; CHECK-NEXT: cvt.u16.u32 %rs1, %r11; +; CHECK-NEXT: mov.b16 %rs2, 3; +; CHECK-NEXT: mov.b32 %r12, {%rs2, %rs1}; +; CHECK-NEXT: st.param.b32 [func_retval0+4], %r12; +; CHECK-NEXT: prmt.b32 %r13, %r3, 0, 0x7770U; +; CHECK-NEXT: cvt.u16.u32 %rs3, %r13; +; CHECK-NEXT: mov.b16 %rs4, 2; +; CHECK-NEXT: mov.b32 %r14, {%rs3, %rs4}; +; CHECK-NEXT: st.param.b32 [func_retval0], %r14; +; CHECK-NEXT: ret; + %v0 = insertelement <8 x i3> poison, i3 1, i32 %idx + %v1 = insertelement <8 x i3> %v0, i3 2, i32 1 + %v2 = insertelement <8 x i3> %v1, i3 3, i32 2 + ret <8 x i3> %v2 +} + +; Test with i4 elements (4-bit, non-byte-aligned) +define <8 x i4> @dynamic_i4(i32 %idx) { +; CHECK-LABEL: dynamic_i4( +; CHECK: { +; CHECK-NEXT: .local .align 8 .b8 __local_depot12[16]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b16 %rs<30>; +; CHECK-NEXT: .reg .b32 %r<22>; +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot12; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i4_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 7; +; CHECK-NEXT: add.u64 %rd3, %SP, 0; +; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2; +; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2}; +; CHECK-NEXT: st.b8 [%rd4], 1; +; CHECK-NEXT: ld.b32 %r3, [%SP]; +; CHECK-NEXT: prmt.b32 %r4, %r3, 0, 0x7770U; +; CHECK-NEXT: cvt.u16.u32 %rs1, %r4; +; CHECK-NEXT: and.b16 %rs2, %rs1, 15; +; CHECK-NEXT: prmt.b32 %r5, %r3, 0, 0x7771U; +; CHECK-NEXT: cvt.u16.u32 %rs3, %r5; +; CHECK-NEXT: and.b16 %rs4, %rs3, 15; +; CHECK-NEXT: shl.b16 %rs5, %rs4, 4; +; CHECK-NEXT: or.b16 %rs6, %rs2, %rs5; +; CHECK-NEXT: prmt.b32 %r6, %r3, 0, 0x7772U; +; CHECK-NEXT: cvt.u16.u32 %rs7, %r6; +; CHECK-NEXT: and.b16 %rs8, %rs7, 15; +; CHECK-NEXT: shl.b16 %rs9, %rs8, 8; +; CHECK-NEXT: or.b16 %rs10, %rs6, %rs9; +; CHECK-NEXT: prmt.b32 %r7, %r3, 0, 0x7773U; +; CHECK-NEXT: cvt.u16.u32 %rs11, %r7; +; CHECK-NEXT: shl.b16 %rs12, %rs11, 12; +; CHECK-NEXT: or.b16 %rs13, %rs10, %rs12; +; CHECK-NEXT: cvt.u32.u16 %r8, %rs13; +; CHECK-NEXT: ld.b32 %r9, [%SP+4]; +; CHECK-NEXT: prmt.b32 %r10, %r9, 0, 0x7770U; +; CHECK-NEXT: cvt.u16.u32 %rs14, %r10; +; CHECK-NEXT: and.b16 %rs15, %rs14, 15; +; CHECK-NEXT: prmt.b32 %r11, %r9, 0, 0x7771U; +; CHECK-NEXT: cvt.u16.u32 %rs16, %r11; +; CHECK-NEXT: and.b16 %rs17, %rs16, 15; +; CHECK-NEXT: shl.b16 %rs18, %rs17, 4; +; CHECK-NEXT: or.b16 %rs19, %rs15, %rs18; +; CHECK-NEXT: prmt.b32 %r12, %r9, 0, 0x7772U; +; CHECK-NEXT: cvt.u16.u32 %rs20, %r12; +; CHECK-NEXT: and.b16 %rs21, %rs20, 15; +; CHECK-NEXT: shl.b16 %rs22, %rs21, 8; +; CHECK-NEXT: or.b16 %rs23, %rs19, %rs22; +; CHECK-NEXT: prmt.b32 %r13, %r9, 0, 0x7773U; +; CHECK-NEXT: cvt.u16.u32 %rs24, %r13; +; CHECK-NEXT: shl.b16 %rs25, %rs24, 12; +; CHECK-NEXT: or.b16 %rs26, %rs23, %rs25; +; CHECK-NEXT: cvt.u32.u16 %r14, %rs26; +; CHECK-NEXT: shl.b32 %r15, %r14, 16; +; CHECK-NEXT: or.b32 %r16, %r8, %r15; +; CHECK-NEXT: mov.b32 %r17, {%rs20, %rs24}; +; CHECK-NEXT: st.param.b32 [func_retval0+12], %r17; +; CHECK-NEXT: mov.b32 %r18, {%rs14, %rs16}; +; CHECK-NEXT: st.param.b32 [func_retval0+8], %r18; +; CHECK-NEXT: mov.b16 %rs27, 2; +; CHECK-NEXT: mov.b32 %r19, {%rs1, %rs27}; +; CHECK-NEXT: st.param.b32 [func_retval0], %r19; +; CHECK-NEXT: shr.u32 %r20, %r16, 12; +; CHECK-NEXT: cvt.u16.u32 %rs28, %r20; +; CHECK-NEXT: mov.b16 %rs29, 3; +; CHECK-NEXT: mov.b32 %r21, {%rs29, %rs28}; +; CHECK-NEXT: st.param.b32 [func_retval0+4], %r21; +; CHECK-NEXT: ret; + %v0 = insertelement <8 x i4> poison, i4 1, i32 %idx + %v1 = insertelement <8 x i4> %v0, i4 2, i32 1 + %v2 = insertelement <8 x i4> %v1, i4 3, i32 2 + ret <8 x i4> %v2 +} + +; Test with i5 elements (5-bit, non-byte-aligned) +define <8 x i5> @dynamic_i5(i32 %idx) { +; CHECK-LABEL: dynamic_i5( +; CHECK: { +; CHECK-NEXT: .local .align 8 .b8 __local_depot13[8]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b16 %rs<5>; +; CHECK-NEXT: .reg .b32 %r<15>; +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot13; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i5_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 7; +; CHECK-NEXT: add.u64 %rd3, %SP, 0; +; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2; +; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2}; +; CHECK-NEXT: st.b8 [%rd4], 1; +; CHECK-NEXT: ld.b32 %r3, [%SP]; +; CHECK-NEXT: ld.b32 %r4, [%SP+4]; +; CHECK-NEXT: prmt.b32 %r5, %r4, 0, 0x7773U; +; CHECK-NEXT: prmt.b32 %r6, %r4, 0, 0x7772U; +; CHECK-NEXT: prmt.b32 %r7, %r6, %r5, 0x5410U; +; CHECK-NEXT: prmt.b32 %r8, %r4, 0, 0x7771U; +; CHECK-NEXT: prmt.b32 %r9, %r4, 0, 0x7770U; +; CHECK-NEXT: prmt.b32 %r10, %r9, %r8, 0x5410U; +; CHECK-NEXT: st.param.v2.b32 [func_retval0+8], {%r10, %r7}; +; CHECK-NEXT: prmt.b32 %r11, %r3, 0, 0x7773U; +; CHECK-NEXT: cvt.u16.u32 %rs1, %r11; +; CHECK-NEXT: mov.b16 %rs2, 3; +; CHECK-NEXT: mov.b32 %r12, {%rs2, %rs1}; +; CHECK-NEXT: prmt.b32 %r13, %r3, 0, 0x7770U; +; CHECK-NEXT: cvt.u16.u32 %rs3, %r13; +; CHECK-NEXT: mov.b16 %rs4, 2; +; CHECK-NEXT: mov.b32 %r14, {%rs3, %rs4}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r14, %r12}; +; CHECK-NEXT: ret; + %v0 = insertelement <8 x i5> poison, i5 1, i32 %idx + %v1 = insertelement <8 x i5> %v0, i5 2, i32 1 + %v2 = insertelement <8 x i5> %v1, i5 3, i32 2 + ret <8 x i5> %v2 +} + +; Test with i7 elements (7-bit, non-byte-aligned) +define <8 x i7> @dynamic_i7(i32 %idx) { +; CHECK-LABEL: dynamic_i7( +; CHECK: { +; CHECK-NEXT: .local .align 8 .b8 __local_depot14[8]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b16 %rs<5>; +; CHECK-NEXT: .reg .b32 %r<15>; +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot14; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i7_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 7; +; CHECK-NEXT: add.u64 %rd3, %SP, 0; +; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2; +; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2}; +; CHECK-NEXT: st.b8 [%rd4], 1; +; CHECK-NEXT: ld.b32 %r3, [%SP]; +; CHECK-NEXT: ld.b32 %r4, [%SP+4]; +; CHECK-NEXT: prmt.b32 %r5, %r4, 0, 0x7773U; +; CHECK-NEXT: prmt.b32 %r6, %r4, 0, 0x7772U; +; CHECK-NEXT: prmt.b32 %r7, %r6, %r5, 0x5410U; +; CHECK-NEXT: prmt.b32 %r8, %r4, 0, 0x7771U; +; CHECK-NEXT: prmt.b32 %r9, %r4, 0, 0x7770U; +; CHECK-NEXT: prmt.b32 %r10, %r9, %r8, 0x5410U; +; CHECK-NEXT: st.param.v2.b32 [func_retval0+8], {%r10, %r7}; +; CHECK-NEXT: prmt.b32 %r11, %r3, 0, 0x7773U; +; CHECK-NEXT: cvt.u16.u32 %rs1, %r11; +; CHECK-NEXT: mov.b16 %rs2, 3; +; CHECK-NEXT: mov.b32 %r12, {%rs2, %rs1}; +; CHECK-NEXT: prmt.b32 %r13, %r3, 0, 0x7770U; +; CHECK-NEXT: cvt.u16.u32 %rs3, %r13; +; CHECK-NEXT: mov.b16 %rs4, 2; +; CHECK-NEXT: mov.b32 %r14, {%rs3, %rs4}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r14, %r12}; +; CHECK-NEXT: ret; + %v0 = insertelement <8 x i7> poison, i7 1, i32 %idx + %v1 = insertelement <8 x i7> %v0, i7 2, i32 1 + %v2 = insertelement <8 x i7> %v1, i7 3, i32 2 + ret <8 x i7> %v2 +} + +; Test with i6 elements (6-bit, non-byte-aligned) +define <8 x i6> @dynamic_i6(i32 %idx) { +; CHECK-LABEL: dynamic_i6( +; CHECK: { +; CHECK-NEXT: .local .align 8 .b8 __local_depot15[8]; +; CHECK-NEXT: .reg .b64 %SP; +; CHECK-NEXT: .reg .b64 %SPL; +; CHECK-NEXT: .reg .b16 %rs<5>; +; CHECK-NEXT: .reg .b32 %r<15>; +; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: mov.b64 %SPL, __local_depot15; +; CHECK-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i6_param_0]; +; CHECK-NEXT: and.b64 %rd2, %rd1, 7; +; CHECK-NEXT: add.u64 %rd3, %SP, 0; +; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2; +; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2}; +; CHECK-NEXT: st.b8 [%rd4], 1; +; CHECK-NEXT: ld.b32 %r3, [%SP]; +; CHECK-NEXT: ld.b32 %r4, [%SP+4]; +; CHECK-NEXT: prmt.b32 %r5, %r4, 0, 0x7773U; +; CHECK-NEXT: prmt.b32 %r6, %r4, 0, 0x7772U; +; CHECK-NEXT: prmt.b32 %r7, %r6, %r5, 0x5410U; +; CHECK-NEXT: prmt.b32 %r8, %r4, 0, 0x7771U; +; CHECK-NEXT: prmt.b32 %r9, %r4, 0, 0x7770U; +; CHECK-NEXT: prmt.b32 %r10, %r9, %r8, 0x5410U; +; CHECK-NEXT: st.param.v2.b32 [func_retval0+8], {%r10, %r7}; +; CHECK-NEXT: prmt.b32 %r11, %r3, 0, 0x7773U; +; CHECK-NEXT: cvt.u16.u32 %rs1, %r11; +; CHECK-NEXT: mov.b16 %rs2, 3; +; CHECK-NEXT: mov.b32 %r12, {%rs2, %rs1}; +; CHECK-NEXT: prmt.b32 %r13, %r3, 0, 0x7770U; +; CHECK-NEXT: cvt.u16.u32 %rs3, %r13; +; CHECK-NEXT: mov.b16 %rs4, 2; +; CHECK-NEXT: mov.b32 %r14, {%rs3, %rs4}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r14, %r12}; +; CHECK-NEXT: ret; + %v0 = insertelement <8 x i6> poison, i6 1, i32 %idx + %v1 = insertelement <8 x i6> %v0, i6 2, i32 1 + %v2 = insertelement <8 x i6> %v1, i6 3, i32 2 + ret <8 x i6> %v2 +} + +; Test with multiple dynamic insertions on i3 elements +define <4 x i3> @multiple_dynamic_i3(i32 %idx0, i32 %idx1) { +; CHECK-LABEL: multiple_dynamic_i3( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [multiple_dynamic_i3_param_0]; +; CHECK-NEXT: shl.b32 %r2, %r1, 3; +; CHECK-NEXT: bfi.b32 %r3, 1, %r4, %r2, 8; +; CHECK-NEXT: ld.param.b32 %r5, [multiple_dynamic_i3_param_1]; +; CHECK-NEXT: shl.b32 %r6, %r5, 3; +; CHECK-NEXT: bfi.b32 %r7, 2, %r3, %r6, 8; +; CHECK-NEXT: st.param.b16 [func_retval0], %r7; +; CHECK-NEXT: shr.u32 %r8, %r7, 16; +; CHECK-NEXT: st.param.b16 [func_retval0+2], %r8; +; CHECK-NEXT: ret; + %v0 = insertelement <4 x i3> poison, i3 1, i32 %idx0 + %v1 = insertelement <4 x i3> %v0, i3 2, i32 %idx1 + ret <4 x i3> %v1 +} diff --git a/llvm/test/CodeGen/PowerPC/combine-sext-and-shl-after-isel.ll b/llvm/test/CodeGen/PowerPC/combine-sext-and-shl-after-isel.ll index 00a77f9..530169f 100644 --- a/llvm/test/CodeGen/PowerPC/combine-sext-and-shl-after-isel.ll +++ b/llvm/test/CodeGen/PowerPC/combine-sext-and-shl-after-isel.ll @@ -212,37 +212,33 @@ define hidden void @testCaller(i1 %incond) local_unnamed_addr align 2 nounwind { ; CHECK-NEXT: std r30, 48(r1) # 8-byte Folded Spill ; CHECK-NEXT: andi. r3, r3, 1 ; CHECK-NEXT: li r3, -1 +; CHECK-NEXT: li r4, 0 ; CHECK-NEXT: li r30, 0 ; CHECK-NEXT: crmove 4*cr2+lt, gt ; CHECK-NEXT: std r29, 40(r1) # 8-byte Folded Spill ; CHECK-NEXT: b .LBB3_2 -; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB3_1: # %if.end116 ; CHECK-NEXT: # ; CHECK-NEXT: bl callee ; CHECK-NEXT: nop ; CHECK-NEXT: mr r3, r29 -; CHECK-NEXT: .LBB3_2: # %cond.end.i.i -; CHECK-NEXT: # =>This Loop Header: Depth=1 -; CHECK-NEXT: # Child Loop BB3_3 Depth 2 -; CHECK-NEXT: lwz r29, 0(r3) -; CHECK-NEXT: li r5, 0 -; CHECK-NEXT: extsw r4, r29 -; CHECK-NEXT: .p2align 5 -; CHECK-NEXT: .LBB3_3: # %while.body5.i -; CHECK-NEXT: # Parent Loop BB3_2 Depth=1 -; CHECK-NEXT: # => This Inner Loop Header: Depth=2 -; CHECK-NEXT: addi r5, r5, -1 -; CHECK-NEXT: cmpwi r5, 0 -; CHECK-NEXT: bgt cr0, .LBB3_3 -; CHECK-NEXT: # %bb.4: # %while.cond12.preheader.i +; CHECK-NEXT: li r4, 0 +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: .LBB3_2: # %while.body5.i ; CHECK-NEXT: # +; CHECK-NEXT: addi r4, r4, -1 +; CHECK-NEXT: cmpwi r4, 0 +; CHECK-NEXT: bgt cr0, .LBB3_2 +; CHECK-NEXT: # %bb.3: # %while.cond12.preheader.i +; CHECK-NEXT: # +; CHECK-NEXT: lwz r29, 0(r3) ; CHECK-NEXT: bc 12, 4*cr2+lt, .LBB3_1 -; CHECK-NEXT: # %bb.5: # %for.cond99.preheader +; CHECK-NEXT: # %bb.4: # %for.cond99.preheader ; CHECK-NEXT: # +; CHECK-NEXT: extsw r4, r29 ; CHECK-NEXT: ld r5, 0(r3) -; CHECK-NEXT: sldi r4, r4, 2 ; CHECK-NEXT: stw r3, 0(r3) +; CHECK-NEXT: sldi r4, r4, 2 ; CHECK-NEXT: stwx r30, r5, r4 ; CHECK-NEXT: b .LBB3_1 ; @@ -256,37 +252,33 @@ define hidden void @testCaller(i1 %incond) local_unnamed_addr align 2 nounwind { ; CHECK-BE-NEXT: std r30, 64(r1) # 8-byte Folded Spill ; CHECK-BE-NEXT: andi. r3, r3, 1 ; CHECK-BE-NEXT: li r3, -1 +; CHECK-BE-NEXT: li r4, 0 ; CHECK-BE-NEXT: li r30, 0 ; CHECK-BE-NEXT: crmove 4*cr2+lt, gt ; CHECK-BE-NEXT: std r29, 56(r1) # 8-byte Folded Spill ; CHECK-BE-NEXT: b .LBB3_2 -; CHECK-BE-NEXT: .p2align 4 ; CHECK-BE-NEXT: .LBB3_1: # %if.end116 ; CHECK-BE-NEXT: # ; CHECK-BE-NEXT: bl callee ; CHECK-BE-NEXT: nop ; CHECK-BE-NEXT: mr r3, r29 -; CHECK-BE-NEXT: .LBB3_2: # %cond.end.i.i -; CHECK-BE-NEXT: # =>This Loop Header: Depth=1 -; CHECK-BE-NEXT: # Child Loop BB3_3 Depth 2 -; CHECK-BE-NEXT: lwz r29, 0(r3) -; CHECK-BE-NEXT: li r5, 0 -; CHECK-BE-NEXT: extsw r4, r29 -; CHECK-BE-NEXT: .p2align 5 -; CHECK-BE-NEXT: .LBB3_3: # %while.body5.i -; CHECK-BE-NEXT: # Parent Loop BB3_2 Depth=1 -; CHECK-BE-NEXT: # => This Inner Loop Header: Depth=2 -; CHECK-BE-NEXT: addi r5, r5, -1 -; CHECK-BE-NEXT: cmpwi r5, 0 -; CHECK-BE-NEXT: bgt cr0, .LBB3_3 -; CHECK-BE-NEXT: # %bb.4: # %while.cond12.preheader.i +; CHECK-BE-NEXT: li r4, 0 +; CHECK-BE-NEXT: .p2align 4 +; CHECK-BE-NEXT: .LBB3_2: # %while.body5.i +; CHECK-BE-NEXT: # +; CHECK-BE-NEXT: addi r4, r4, -1 +; CHECK-BE-NEXT: cmpwi r4, 0 +; CHECK-BE-NEXT: bgt cr0, .LBB3_2 +; CHECK-BE-NEXT: # %bb.3: # %while.cond12.preheader.i ; CHECK-BE-NEXT: # +; CHECK-BE-NEXT: lwz r29, 0(r3) ; CHECK-BE-NEXT: bc 12, 4*cr2+lt, .LBB3_1 -; CHECK-BE-NEXT: # %bb.5: # %for.cond99.preheader +; CHECK-BE-NEXT: # %bb.4: # %for.cond99.preheader ; CHECK-BE-NEXT: # +; CHECK-BE-NEXT: extsw r4, r29 ; CHECK-BE-NEXT: ld r5, 0(r3) -; CHECK-BE-NEXT: sldi r4, r4, 2 ; CHECK-BE-NEXT: stw r3, 0(r3) +; CHECK-BE-NEXT: sldi r4, r4, 2 ; CHECK-BE-NEXT: stwx r30, r5, r4 ; CHECK-BE-NEXT: b .LBB3_1 ; @@ -300,32 +292,28 @@ define hidden void @testCaller(i1 %incond) local_unnamed_addr align 2 nounwind { ; CHECK-P9-NEXT: std r0, 80(r1) ; CHECK-P9-NEXT: std r30, 48(r1) # 8-byte Folded Spill ; CHECK-P9-NEXT: li r3, -1 +; CHECK-P9-NEXT: li r4, 0 ; CHECK-P9-NEXT: li r30, 0 ; CHECK-P9-NEXT: std r29, 40(r1) # 8-byte Folded Spill ; CHECK-P9-NEXT: crmove 4*cr2+lt, gt ; CHECK-P9-NEXT: b .LBB3_2 -; CHECK-P9-NEXT: .p2align 4 ; CHECK-P9-NEXT: .LBB3_1: # %if.end116 ; CHECK-P9-NEXT: # ; CHECK-P9-NEXT: bl callee ; CHECK-P9-NEXT: nop ; CHECK-P9-NEXT: mr r3, r29 -; CHECK-P9-NEXT: .LBB3_2: # %cond.end.i.i -; CHECK-P9-NEXT: # =>This Loop Header: Depth=1 -; CHECK-P9-NEXT: # Child Loop BB3_3 Depth 2 -; CHECK-P9-NEXT: lwz r29, 0(r3) ; CHECK-P9-NEXT: li r4, 0 -; CHECK-P9-NEXT: .p2align 5 -; CHECK-P9-NEXT: .LBB3_3: # %while.body5.i -; CHECK-P9-NEXT: # Parent Loop BB3_2 Depth=1 -; CHECK-P9-NEXT: # => This Inner Loop Header: Depth=2 +; CHECK-P9-NEXT: .p2align 4 +; CHECK-P9-NEXT: .LBB3_2: # %while.body5.i +; CHECK-P9-NEXT: # ; CHECK-P9-NEXT: addi r4, r4, -1 ; CHECK-P9-NEXT: cmpwi r4, 0 -; CHECK-P9-NEXT: bgt cr0, .LBB3_3 -; CHECK-P9-NEXT: # %bb.4: # %while.cond12.preheader.i +; CHECK-P9-NEXT: bgt cr0, .LBB3_2 +; CHECK-P9-NEXT: # %bb.3: # %while.cond12.preheader.i ; CHECK-P9-NEXT: # +; CHECK-P9-NEXT: lwz r29, 0(r3) ; CHECK-P9-NEXT: bc 12, 4*cr2+lt, .LBB3_1 -; CHECK-P9-NEXT: # %bb.5: # %for.cond99.preheader +; CHECK-P9-NEXT: # %bb.4: # %for.cond99.preheader ; CHECK-P9-NEXT: # ; CHECK-P9-NEXT: ld r4, 0(r3) ; CHECK-P9-NEXT: extswsli r5, r29, 2 @@ -343,32 +331,28 @@ define hidden void @testCaller(i1 %incond) local_unnamed_addr align 2 nounwind { ; CHECK-P9-BE-NEXT: std r0, 96(r1) ; CHECK-P9-BE-NEXT: std r30, 64(r1) # 8-byte Folded Spill ; CHECK-P9-BE-NEXT: li r3, -1 +; CHECK-P9-BE-NEXT: li r4, 0 ; CHECK-P9-BE-NEXT: li r30, 0 ; CHECK-P9-BE-NEXT: std r29, 56(r1) # 8-byte Folded Spill ; CHECK-P9-BE-NEXT: crmove 4*cr2+lt, gt ; CHECK-P9-BE-NEXT: b .LBB3_2 -; CHECK-P9-BE-NEXT: .p2align 4 ; CHECK-P9-BE-NEXT: .LBB3_1: # %if.end116 ; CHECK-P9-BE-NEXT: # ; CHECK-P9-BE-NEXT: bl callee ; CHECK-P9-BE-NEXT: nop ; CHECK-P9-BE-NEXT: mr r3, r29 -; CHECK-P9-BE-NEXT: .LBB3_2: # %cond.end.i.i -; CHECK-P9-BE-NEXT: # =>This Loop Header: Depth=1 -; CHECK-P9-BE-NEXT: # Child Loop BB3_3 Depth 2 -; CHECK-P9-BE-NEXT: lwz r29, 0(r3) ; CHECK-P9-BE-NEXT: li r4, 0 -; CHECK-P9-BE-NEXT: .p2align 5 -; CHECK-P9-BE-NEXT: .LBB3_3: # %while.body5.i -; CHECK-P9-BE-NEXT: # Parent Loop BB3_2 Depth=1 -; CHECK-P9-BE-NEXT: # => This Inner Loop Header: Depth=2 +; CHECK-P9-BE-NEXT: .p2align 4 +; CHECK-P9-BE-NEXT: .LBB3_2: # %while.body5.i +; CHECK-P9-BE-NEXT: # ; CHECK-P9-BE-NEXT: addi r4, r4, -1 ; CHECK-P9-BE-NEXT: cmpwi r4, 0 -; CHECK-P9-BE-NEXT: bgt cr0, .LBB3_3 -; CHECK-P9-BE-NEXT: # %bb.4: # %while.cond12.preheader.i +; CHECK-P9-BE-NEXT: bgt cr0, .LBB3_2 +; CHECK-P9-BE-NEXT: # %bb.3: # %while.cond12.preheader.i ; CHECK-P9-BE-NEXT: # +; CHECK-P9-BE-NEXT: lwz r29, 0(r3) ; CHECK-P9-BE-NEXT: bc 12, 4*cr2+lt, .LBB3_1 -; CHECK-P9-BE-NEXT: # %bb.5: # %for.cond99.preheader +; CHECK-P9-BE-NEXT: # %bb.4: # %for.cond99.preheader ; CHECK-P9-BE-NEXT: # ; CHECK-P9-BE-NEXT: ld r4, 0(r3) ; CHECK-P9-BE-NEXT: extswsli r5, r29, 2 diff --git a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll index 291a9c1..b006c78 100644 --- a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll +++ b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll @@ -242,17 +242,14 @@ define <2 x i64> @testDoubleword(<2 x i64> %a, i64 %b, i64 %idx) { ; AIX-P8-32-LABEL: testDoubleword: ; AIX-P8-32: # %bb.0: # %entry ; AIX-P8-32-NEXT: add r6, r6, r6 -; AIX-P8-32-NEXT: addi r5, r1, -32 +; AIX-P8-32-NEXT: addi r5, r1, -16 ; AIX-P8-32-NEXT: rlwinm r7, r6, 2, 28, 29 -; AIX-P8-32-NEXT: stxvw4x v2, 0, r5 +; AIX-P8-32-NEXT: stxvd2x v2, 0, r5 ; AIX-P8-32-NEXT: stwx r3, r5, r7 -; AIX-P8-32-NEXT: addi r3, r1, -16 -; AIX-P8-32-NEXT: lxvw4x vs0, 0, r5 -; AIX-P8-32-NEXT: addi r5, r6, 1 -; AIX-P8-32-NEXT: rlwinm r5, r5, 2, 28, 29 -; AIX-P8-32-NEXT: stxvw4x vs0, 0, r3 -; AIX-P8-32-NEXT: stwx r4, r3, r5 -; AIX-P8-32-NEXT: lxvw4x v2, 0, r3 +; AIX-P8-32-NEXT: addi r3, r6, 1 +; AIX-P8-32-NEXT: rlwinm r3, r3, 2, 28, 29 +; AIX-P8-32-NEXT: stwx r4, r5, r3 +; AIX-P8-32-NEXT: lxvd2x v2, 0, r5 ; AIX-P8-32-NEXT: blr entry: %vecins = insertelement <2 x i64> %a, i64 %b, i64 %idx @@ -426,17 +423,14 @@ define <4 x float> @testFloat2(<4 x float> %a, ptr %b, i32 zeroext %idx1, i32 ze ; AIX-P8-LABEL: testFloat2: ; AIX-P8: # %bb.0: # %entry ; AIX-P8-NEXT: lwz r6, 0(r3) -; AIX-P8-NEXT: rlwinm r4, r4, 2, 28, 29 -; AIX-P8-NEXT: addi r7, r1, -32 +; AIX-P8-NEXT: lwz r3, 1(r3) +; AIX-P8-NEXT: addi r7, r1, -16 ; AIX-P8-NEXT: stxvw4x v2, 0, r7 ; AIX-P8-NEXT: rlwinm r5, r5, 2, 28, 29 +; AIX-P8-NEXT: rlwinm r4, r4, 2, 28, 29 ; AIX-P8-NEXT: stwx r6, r7, r4 -; AIX-P8-NEXT: addi r4, r1, -16 -; AIX-P8-NEXT: lxvw4x vs0, 0, r7 -; AIX-P8-NEXT: lwz r3, 1(r3) -; AIX-P8-NEXT: stxvw4x vs0, 0, r4 -; AIX-P8-NEXT: stwx r3, r4, r5 -; AIX-P8-NEXT: lxvw4x v2, 0, r4 +; AIX-P8-NEXT: stwx r3, r7, r5 +; AIX-P8-NEXT: lxvw4x v2, 0, r7 ; AIX-P8-NEXT: blr entry: %add.ptr1 = getelementptr inbounds i8, ptr %b, i64 1 @@ -493,38 +487,32 @@ define <4 x float> @testFloat3(<4 x float> %a, ptr %b, i32 zeroext %idx1, i32 ze ; ; AIX-P8-64-LABEL: testFloat3: ; AIX-P8-64: # %bb.0: # %entry +; AIX-P8-64-NEXT: li r7, 1 ; AIX-P8-64-NEXT: lis r6, 1 -; AIX-P8-64-NEXT: rlwinm r4, r4, 2, 28, 29 -; AIX-P8-64-NEXT: addi r7, r1, -32 ; AIX-P8-64-NEXT: rlwinm r5, r5, 2, 28, 29 +; AIX-P8-64-NEXT: rlwinm r4, r4, 2, 28, 29 +; AIX-P8-64-NEXT: rldic r7, r7, 36, 27 ; AIX-P8-64-NEXT: lwzx r6, r3, r6 +; AIX-P8-64-NEXT: lwzx r3, r3, r7 +; AIX-P8-64-NEXT: addi r7, r1, -16 ; AIX-P8-64-NEXT: stxvw4x v2, 0, r7 ; AIX-P8-64-NEXT: stwx r6, r7, r4 -; AIX-P8-64-NEXT: li r4, 1 -; AIX-P8-64-NEXT: lxvw4x vs0, 0, r7 -; AIX-P8-64-NEXT: rldic r4, r4, 36, 27 -; AIX-P8-64-NEXT: lwzx r3, r3, r4 -; AIX-P8-64-NEXT: addi r4, r1, -16 -; AIX-P8-64-NEXT: stxvw4x vs0, 0, r4 -; AIX-P8-64-NEXT: stwx r3, r4, r5 -; AIX-P8-64-NEXT: lxvw4x v2, 0, r4 +; AIX-P8-64-NEXT: stwx r3, r7, r5 +; AIX-P8-64-NEXT: lxvw4x v2, 0, r7 ; AIX-P8-64-NEXT: blr ; ; AIX-P8-32-LABEL: testFloat3: ; AIX-P8-32: # %bb.0: # %entry ; AIX-P8-32-NEXT: lis r6, 1 -; AIX-P8-32-NEXT: rlwinm r4, r4, 2, 28, 29 -; AIX-P8-32-NEXT: addi r7, r1, -32 ; AIX-P8-32-NEXT: rlwinm r5, r5, 2, 28, 29 +; AIX-P8-32-NEXT: rlwinm r4, r4, 2, 28, 29 +; AIX-P8-32-NEXT: addi r7, r1, -16 ; AIX-P8-32-NEXT: lwzx r6, r3, r6 +; AIX-P8-32-NEXT: lwz r3, 0(r3) ; AIX-P8-32-NEXT: stxvw4x v2, 0, r7 ; AIX-P8-32-NEXT: stwx r6, r7, r4 -; AIX-P8-32-NEXT: addi r4, r1, -16 -; AIX-P8-32-NEXT: lxvw4x vs0, 0, r7 -; AIX-P8-32-NEXT: lwz r3, 0(r3) -; AIX-P8-32-NEXT: stxvw4x vs0, 0, r4 -; AIX-P8-32-NEXT: stwx r3, r4, r5 -; AIX-P8-32-NEXT: lxvw4x v2, 0, r4 +; AIX-P8-32-NEXT: stwx r3, r7, r5 +; AIX-P8-32-NEXT: lxvw4x v2, 0, r7 ; AIX-P8-32-NEXT: blr entry: %add.ptr = getelementptr inbounds i8, ptr %b, i64 65536 diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll index d07f608..c3183a1 100644 --- a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll +++ b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll @@ -290,9 +290,9 @@ define void @liveConstant() { ; CHECK-NEXT: .half 2 ; CHECK-NEXT: .half 0 ; CHECK-NEXT: .word -define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) { +define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i8 %l25, i16 zeroext %l26, i32 signext %l27) { entry: - call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 28, ptr null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) + call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 28, ptr null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i8 %l25, i16 %l26, i32 %l27) ret void } diff --git a/llvm/test/CodeGen/RISCV/rv64p.ll b/llvm/test/CodeGen/RISCV/rv64p.ll index cb07f94..f937f44 100644 --- a/llvm/test/CodeGen/RISCV/rv64p.ll +++ b/llvm/test/CodeGen/RISCV/rv64p.ll @@ -297,8 +297,7 @@ declare i32 @llvm.abs.i32(i32, i1 immarg) define i32 @abs_i32(i32 %x) { ; CHECK-LABEL: abs_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: sext.w a0, a0 -; CHECK-NEXT: abs a0, a0 +; CHECK-NEXT: absw a0, a0 ; CHECK-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) ret i32 %abs @@ -307,8 +306,7 @@ define i32 @abs_i32(i32 %x) { define signext i32 @abs_i32_sext(i32 signext %x) { ; CHECK-LABEL: abs_i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: abs a0, a0 -; CHECK-NEXT: sext.w a0, a0 +; CHECK-NEXT: absw a0, a0 ; CHECK-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) ret i32 %abs diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll index 9c36bae..ec257bc 100644 --- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll +++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll @@ -6,77 +6,81 @@ define void @arm_min_q31(ptr nocapture readonly %pSrc, i32 %blockSize, ptr nocap ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: ldr.w r12, [r0] ; CHECK-NEXT: subs.w r9, r1, #1 ; CHECK-NEXT: beq .LBB0_3 ; CHECK-NEXT: @ %bb.1: @ %while.body.preheader -; CHECK-NEXT: and r8, r9, #3 +; CHECK-NEXT: and r6, r9, #3 ; CHECK-NEXT: subs r7, r1, #2 ; CHECK-NEXT: cmp r7, #3 ; CHECK-NEXT: bhs .LBB0_4 ; CHECK-NEXT: @ %bb.2: -; CHECK-NEXT: movs r6, #0 -; CHECK-NEXT: b .LBB0_6 +; CHECK-NEXT: mov.w r10, #0 +; CHECK-NEXT: cbnz r6, .LBB0_7 +; CHECK-NEXT: b .LBB0_10 ; CHECK-NEXT: .LBB0_3: -; CHECK-NEXT: movs r6, #0 +; CHECK-NEXT: mov.w r10, #0 ; CHECK-NEXT: b .LBB0_10 ; CHECK-NEXT: .LBB0_4: @ %while.body.preheader.new ; CHECK-NEXT: bic r7, r9, #3 -; CHECK-NEXT: movs r6, #1 +; CHECK-NEXT: str r6, [sp] @ 4-byte Spill ; CHECK-NEXT: subs r7, #4 +; CHECK-NEXT: movs r6, #1 +; CHECK-NEXT: mov.w r8, #0 +; CHECK-NEXT: mov.w r10, #0 ; CHECK-NEXT: add.w lr, r6, r7, lsr #2 -; CHECK-NEXT: movs r6, #0 -; CHECK-NEXT: movs r7, #4 ; CHECK-NEXT: .LBB0_5: @ %while.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: ldr r10, [r0, #16]! -; CHECK-NEXT: sub.w r9, r9, #4 -; CHECK-NEXT: ldrd r5, r4, [r0, #-12] -; CHECK-NEXT: ldr r11, [r0, #-4] +; CHECK-NEXT: ldr r11, [r0, #16]! +; CHECK-NEXT: ldrd r5, r7, [r0, #-12] +; CHECK-NEXT: ldr r4, [r0, #-4] ; CHECK-NEXT: cmp r12, r5 -; CHECK-NEXT: it gt -; CHECK-NEXT: subgt r6, r7, #3 ; CHECK-NEXT: csel r5, r5, r12, gt -; CHECK-NEXT: cmp r5, r4 +; CHECK-NEXT: csinc r6, r10, r8, le +; CHECK-NEXT: cmp r5, r7 ; CHECK-NEXT: it gt -; CHECK-NEXT: subgt r6, r7, #2 -; CHECK-NEXT: csel r5, r4, r5, gt -; CHECK-NEXT: cmp r5, r11 +; CHECK-NEXT: addgt.w r6, r8, #2 +; CHECK-NEXT: csel r7, r7, r5, gt +; CHECK-NEXT: cmp r7, r4 ; CHECK-NEXT: it gt -; CHECK-NEXT: subgt r6, r7, #1 -; CHECK-NEXT: csel r5, r11, r5, gt -; CHECK-NEXT: cmp r5, r10 -; CHECK-NEXT: csel r6, r7, r6, gt -; CHECK-NEXT: add.w r7, r7, #4 -; CHECK-NEXT: csel r12, r10, r5, gt +; CHECK-NEXT: addgt.w r6, r8, #3 +; CHECK-NEXT: csel r7, r4, r7, gt +; CHECK-NEXT: add.w r8, r8, #4 +; CHECK-NEXT: cmp r7, r11 +; CHECK-NEXT: csel r10, r8, r6, gt +; CHECK-NEXT: csel r12, r11, r7, gt ; CHECK-NEXT: le lr, .LBB0_5 -; CHECK-NEXT: .LBB0_6: @ %while.end.loopexit.unr-lcssa -; CHECK-NEXT: cmp.w r8, #0 -; CHECK-NEXT: beq .LBB0_10 -; CHECK-NEXT: @ %bb.7: @ %while.body.epil +; CHECK-NEXT: @ %bb.6: @ %while.end.loopexit.unr-lcssa.loopexit +; CHECK-NEXT: ldr r6, [sp] @ 4-byte Reload +; CHECK-NEXT: sub.w r9, r9, r8 +; CHECK-NEXT: cbz r6, .LBB0_10 +; CHECK-NEXT: .LBB0_7: @ %while.body.epil ; CHECK-NEXT: ldr r7, [r0, #4] ; CHECK-NEXT: sub.w r1, r1, r9 ; CHECK-NEXT: cmp r12, r7 -; CHECK-NEXT: csel r6, r1, r6, gt +; CHECK-NEXT: csel r10, r1, r10, gt ; CHECK-NEXT: csel r12, r7, r12, gt -; CHECK-NEXT: cmp.w r8, #1 +; CHECK-NEXT: cmp r6, #1 ; CHECK-NEXT: beq .LBB0_10 ; CHECK-NEXT: @ %bb.8: @ %while.body.epil.1 ; CHECK-NEXT: ldr r7, [r0, #8] ; CHECK-NEXT: cmp r12, r7 -; CHECK-NEXT: csinc r6, r6, r1, le +; CHECK-NEXT: csinc r10, r10, r1, le ; CHECK-NEXT: csel r12, r7, r12, gt -; CHECK-NEXT: cmp.w r8, #2 +; CHECK-NEXT: cmp r6, #2 ; CHECK-NEXT: beq .LBB0_10 ; CHECK-NEXT: @ %bb.9: @ %while.body.epil.2 ; CHECK-NEXT: ldr r0, [r0, #12] ; CHECK-NEXT: cmp r12, r0 ; CHECK-NEXT: it gt -; CHECK-NEXT: addgt r6, r1, #2 +; CHECK-NEXT: addgt.w r10, r1, #2 ; CHECK-NEXT: csel r12, r0, r12, gt ; CHECK-NEXT: .LBB0_10: @ %while.end ; CHECK-NEXT: str.w r12, [r2] -; CHECK-NEXT: str r6, [r3] +; CHECK-NEXT: str.w r10, [r3] +; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} entry: %0 = load i32, ptr %pSrc, align 4 diff --git a/llvm/test/CodeGen/X86/and-mask-variable.ll b/llvm/test/CodeGen/X86/and-mask-variable.ll index d89f0db..3e5bd69 100644 --- a/llvm/test/CodeGen/X86/and-mask-variable.ll +++ b/llvm/test/CodeGen/X86/and-mask-variable.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86-NOBMI -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86-BMI2 -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86-BMI2 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64-NOBMI -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64-BMI2 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64-BMI2 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X86-NOBMI +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=X86-BMI2 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=X86-BMI2 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X64-NOBMI +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=X64-BMI2 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=X64-BMI2 define i32 @mask_pair(i32 %x, i32 %y) nounwind { ; X86-NOBMI-LABEL: mask_pair: diff --git a/llvm/test/CodeGen/X86/atomic-load-store.ll b/llvm/test/CodeGen/X86/atomic-load-store.ll index 3e7b73a..1173c45 100644 --- a/llvm/test/CodeGen/X86/atomic-load-store.ll +++ b/llvm/test/CodeGen/X86/atomic-load-store.ll @@ -1,12 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64 | FileCheck %s --check-prefixes=CHECK,CHECK-O3 -; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-SSE-O3 -; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX-O3 -; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX-O3 -; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64 | FileCheck %s --check-prefixes=CHECK,CHECK-O0 -; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-SSE-O0 -; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX-O0 -; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX-O0 +; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64 | FileCheck %s --check-prefixes=CHECK,CHECK-O3,CHECK-SSE-O3 +; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-O3,CHECK-SSE-O3 +; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-O3,CHECK-AVX-O3,CHECK-AVX2-O3 +; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-O3,CHECK-AVX-O3,CHECK-AVX512-O3 +; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64 | FileCheck %s --check-prefixes=CHECK,CHECK-O0,CHECK-SSE-O0 +; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-O0,CHECK-SSE-O0 +; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-O0,CHECK-AVX-O0,CHECK-AVX2-O0 +; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-O0,CHECK-AVX-O0,CHECK-AVX512-O0 define void @test1(ptr %ptr, i32 %val1) { ; CHECK-LABEL: test1: @@ -50,30 +50,10 @@ define <1 x i8> @atomic_vec1_i8(ptr %x) { ; CHECK-O3-NEXT: movzbl (%rdi), %eax ; CHECK-O3-NEXT: retq ; -; CHECK-SSE-O3-LABEL: atomic_vec1_i8: -; CHECK-SSE-O3: # %bb.0: -; CHECK-SSE-O3-NEXT: movzbl (%rdi), %eax -; CHECK-SSE-O3-NEXT: retq -; -; CHECK-AVX-O3-LABEL: atomic_vec1_i8: -; CHECK-AVX-O3: # %bb.0: -; CHECK-AVX-O3-NEXT: movzbl (%rdi), %eax -; CHECK-AVX-O3-NEXT: retq -; ; CHECK-O0-LABEL: atomic_vec1_i8: ; CHECK-O0: # %bb.0: ; CHECK-O0-NEXT: movb (%rdi), %al ; CHECK-O0-NEXT: retq -; -; CHECK-SSE-O0-LABEL: atomic_vec1_i8: -; CHECK-SSE-O0: # %bb.0: -; CHECK-SSE-O0-NEXT: movb (%rdi), %al -; CHECK-SSE-O0-NEXT: retq -; -; CHECK-AVX-O0-LABEL: atomic_vec1_i8: -; CHECK-AVX-O0: # %bb.0: -; CHECK-AVX-O0-NEXT: movb (%rdi), %al -; CHECK-AVX-O0-NEXT: retq %ret = load atomic <1 x i8>, ptr %x acquire, align 1 ret <1 x i8> %ret } @@ -84,30 +64,10 @@ define <1 x i16> @atomic_vec1_i16(ptr %x) { ; CHECK-O3-NEXT: movzwl (%rdi), %eax ; CHECK-O3-NEXT: retq ; -; CHECK-SSE-O3-LABEL: atomic_vec1_i16: -; CHECK-SSE-O3: # %bb.0: -; CHECK-SSE-O3-NEXT: movzwl (%rdi), %eax -; CHECK-SSE-O3-NEXT: retq -; -; CHECK-AVX-O3-LABEL: atomic_vec1_i16: -; CHECK-AVX-O3: # %bb.0: -; CHECK-AVX-O3-NEXT: movzwl (%rdi), %eax -; CHECK-AVX-O3-NEXT: retq -; ; CHECK-O0-LABEL: atomic_vec1_i16: ; CHECK-O0: # %bb.0: ; CHECK-O0-NEXT: movw (%rdi), %ax ; CHECK-O0-NEXT: retq -; -; CHECK-SSE-O0-LABEL: atomic_vec1_i16: -; CHECK-SSE-O0: # %bb.0: -; CHECK-SSE-O0-NEXT: movw (%rdi), %ax -; CHECK-SSE-O0-NEXT: retq -; -; CHECK-AVX-O0-LABEL: atomic_vec1_i16: -; CHECK-AVX-O0: # %bb.0: -; CHECK-AVX-O0-NEXT: movw (%rdi), %ax -; CHECK-AVX-O0-NEXT: retq %ret = load atomic <1 x i16>, ptr %x acquire, align 2 ret <1 x i16> %ret } @@ -119,35 +79,11 @@ define <1 x i32> @atomic_vec1_i8_zext(ptr %x) { ; CHECK-O3-NEXT: movzbl %al, %eax ; CHECK-O3-NEXT: retq ; -; CHECK-SSE-O3-LABEL: atomic_vec1_i8_zext: -; CHECK-SSE-O3: # %bb.0: -; CHECK-SSE-O3-NEXT: movzbl (%rdi), %eax -; CHECK-SSE-O3-NEXT: movzbl %al, %eax -; CHECK-SSE-O3-NEXT: retq -; -; CHECK-AVX-O3-LABEL: atomic_vec1_i8_zext: -; CHECK-AVX-O3: # %bb.0: -; CHECK-AVX-O3-NEXT: movzbl (%rdi), %eax -; CHECK-AVX-O3-NEXT: movzbl %al, %eax -; CHECK-AVX-O3-NEXT: retq -; ; CHECK-O0-LABEL: atomic_vec1_i8_zext: ; CHECK-O0: # %bb.0: ; CHECK-O0-NEXT: movb (%rdi), %al ; CHECK-O0-NEXT: movzbl %al, %eax ; CHECK-O0-NEXT: retq -; -; CHECK-SSE-O0-LABEL: atomic_vec1_i8_zext: -; CHECK-SSE-O0: # %bb.0: -; CHECK-SSE-O0-NEXT: movb (%rdi), %al -; CHECK-SSE-O0-NEXT: movzbl %al, %eax -; CHECK-SSE-O0-NEXT: retq -; -; CHECK-AVX-O0-LABEL: atomic_vec1_i8_zext: -; CHECK-AVX-O0: # %bb.0: -; CHECK-AVX-O0-NEXT: movb (%rdi), %al -; CHECK-AVX-O0-NEXT: movzbl %al, %eax -; CHECK-AVX-O0-NEXT: retq %ret = load atomic <1 x i8>, ptr %x acquire, align 1 %zret = zext <1 x i8> %ret to <1 x i32> ret <1 x i32> %zret @@ -160,35 +96,11 @@ define <1 x i64> @atomic_vec1_i16_sext(ptr %x) { ; CHECK-O3-NEXT: movswq %ax, %rax ; CHECK-O3-NEXT: retq ; -; CHECK-SSE-O3-LABEL: atomic_vec1_i16_sext: -; CHECK-SSE-O3: # %bb.0: -; CHECK-SSE-O3-NEXT: movzwl (%rdi), %eax -; CHECK-SSE-O3-NEXT: movswq %ax, %rax -; CHECK-SSE-O3-NEXT: retq -; -; CHECK-AVX-O3-LABEL: atomic_vec1_i16_sext: -; CHECK-AVX-O3: # %bb.0: -; CHECK-AVX-O3-NEXT: movzwl (%rdi), %eax -; CHECK-AVX-O3-NEXT: movswq %ax, %rax -; CHECK-AVX-O3-NEXT: retq -; ; CHECK-O0-LABEL: atomic_vec1_i16_sext: ; CHECK-O0: # %bb.0: ; CHECK-O0-NEXT: movw (%rdi), %ax ; CHECK-O0-NEXT: movswq %ax, %rax ; CHECK-O0-NEXT: retq -; -; CHECK-SSE-O0-LABEL: atomic_vec1_i16_sext: -; CHECK-SSE-O0: # %bb.0: -; CHECK-SSE-O0-NEXT: movw (%rdi), %ax -; CHECK-SSE-O0-NEXT: movswq %ax, %rax -; CHECK-SSE-O0-NEXT: retq -; -; CHECK-AVX-O0-LABEL: atomic_vec1_i16_sext: -; CHECK-AVX-O0: # %bb.0: -; CHECK-AVX-O0-NEXT: movw (%rdi), %ax -; CHECK-AVX-O0-NEXT: movswq %ax, %rax -; CHECK-AVX-O0-NEXT: retq %ret = load atomic <1 x i16>, ptr %x acquire, align 2 %sret = sext <1 x i16> %ret to <1 x i64> ret <1 x i64> %sret @@ -204,12 +116,6 @@ define <1 x ptr addrspace(270)> @atomic_vec1_ptr270(ptr %x) { } define <1 x bfloat> @atomic_vec1_bfloat(ptr %x) { -; CHECK-O3-LABEL: atomic_vec1_bfloat: -; CHECK-O3: # %bb.0: -; CHECK-O3-NEXT: movzwl (%rdi), %eax -; CHECK-O3-NEXT: pinsrw $0, %eax, %xmm0 -; CHECK-O3-NEXT: retq -; ; CHECK-SSE-O3-LABEL: atomic_vec1_bfloat: ; CHECK-SSE-O3: # %bb.0: ; CHECK-SSE-O3-NEXT: movzwl (%rdi), %eax @@ -222,15 +128,6 @@ define <1 x bfloat> @atomic_vec1_bfloat(ptr %x) { ; CHECK-AVX-O3-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ; CHECK-AVX-O3-NEXT: retq ; -; CHECK-O0-LABEL: atomic_vec1_bfloat: -; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movw (%rdi), %cx -; CHECK-O0-NEXT: # implicit-def: $eax -; CHECK-O0-NEXT: movw %cx, %ax -; CHECK-O0-NEXT: # implicit-def: $xmm0 -; CHECK-O0-NEXT: pinsrw $0, %eax, %xmm0 -; CHECK-O0-NEXT: retq -; ; CHECK-SSE-O0-LABEL: atomic_vec1_bfloat: ; CHECK-SSE-O0: # %bb.0: ; CHECK-SSE-O0-NEXT: movw (%rdi), %cx @@ -283,30 +180,6 @@ define <1 x ptr> @atomic_vec1_ptr(ptr %x) nounwind { ; CHECK-O3-NEXT: popq %rcx ; CHECK-O3-NEXT: retq ; -; CHECK-SSE-O3-LABEL: atomic_vec1_ptr: -; CHECK-SSE-O3: # %bb.0: -; CHECK-SSE-O3-NEXT: pushq %rax -; CHECK-SSE-O3-NEXT: movq %rdi, %rsi -; CHECK-SSE-O3-NEXT: movq %rsp, %rdx -; CHECK-SSE-O3-NEXT: movl $8, %edi -; CHECK-SSE-O3-NEXT: movl $2, %ecx -; CHECK-SSE-O3-NEXT: callq __atomic_load@PLT -; CHECK-SSE-O3-NEXT: movq (%rsp), %rax -; CHECK-SSE-O3-NEXT: popq %rcx -; CHECK-SSE-O3-NEXT: retq -; -; CHECK-AVX-O3-LABEL: atomic_vec1_ptr: -; CHECK-AVX-O3: # %bb.0: -; CHECK-AVX-O3-NEXT: pushq %rax -; CHECK-AVX-O3-NEXT: movq %rdi, %rsi -; CHECK-AVX-O3-NEXT: movq %rsp, %rdx -; CHECK-AVX-O3-NEXT: movl $8, %edi -; CHECK-AVX-O3-NEXT: movl $2, %ecx -; CHECK-AVX-O3-NEXT: callq __atomic_load@PLT -; CHECK-AVX-O3-NEXT: movq (%rsp), %rax -; CHECK-AVX-O3-NEXT: popq %rcx -; CHECK-AVX-O3-NEXT: retq -; ; CHECK-O0-LABEL: atomic_vec1_ptr: ; CHECK-O0: # %bb.0: ; CHECK-O0-NEXT: pushq %rax @@ -318,41 +191,11 @@ define <1 x ptr> @atomic_vec1_ptr(ptr %x) nounwind { ; CHECK-O0-NEXT: movq (%rsp), %rax ; CHECK-O0-NEXT: popq %rcx ; CHECK-O0-NEXT: retq -; -; CHECK-SSE-O0-LABEL: atomic_vec1_ptr: -; CHECK-SSE-O0: # %bb.0: -; CHECK-SSE-O0-NEXT: pushq %rax -; CHECK-SSE-O0-NEXT: movq %rdi, %rsi -; CHECK-SSE-O0-NEXT: movl $8, %edi -; CHECK-SSE-O0-NEXT: movq %rsp, %rdx -; CHECK-SSE-O0-NEXT: movl $2, %ecx -; CHECK-SSE-O0-NEXT: callq __atomic_load@PLT -; CHECK-SSE-O0-NEXT: movq (%rsp), %rax -; CHECK-SSE-O0-NEXT: popq %rcx -; CHECK-SSE-O0-NEXT: retq -; -; CHECK-AVX-O0-LABEL: atomic_vec1_ptr: -; CHECK-AVX-O0: # %bb.0: -; CHECK-AVX-O0-NEXT: pushq %rax -; CHECK-AVX-O0-NEXT: movq %rdi, %rsi -; CHECK-AVX-O0-NEXT: movl $8, %edi -; CHECK-AVX-O0-NEXT: movq %rsp, %rdx -; CHECK-AVX-O0-NEXT: movl $2, %ecx -; CHECK-AVX-O0-NEXT: callq __atomic_load@PLT -; CHECK-AVX-O0-NEXT: movq (%rsp), %rax -; CHECK-AVX-O0-NEXT: popq %rcx -; CHECK-AVX-O0-NEXT: retq %ret = load atomic <1 x ptr>, ptr %x acquire, align 4 ret <1 x ptr> %ret } define <1 x half> @atomic_vec1_half(ptr %x) { -; CHECK-O3-LABEL: atomic_vec1_half: -; CHECK-O3: # %bb.0: -; CHECK-O3-NEXT: movzwl (%rdi), %eax -; CHECK-O3-NEXT: pinsrw $0, %eax, %xmm0 -; CHECK-O3-NEXT: retq -; ; CHECK-SSE-O3-LABEL: atomic_vec1_half: ; CHECK-SSE-O3: # %bb.0: ; CHECK-SSE-O3-NEXT: movzwl (%rdi), %eax @@ -365,15 +208,6 @@ define <1 x half> @atomic_vec1_half(ptr %x) { ; CHECK-AVX-O3-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ; CHECK-AVX-O3-NEXT: retq ; -; CHECK-O0-LABEL: atomic_vec1_half: -; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movw (%rdi), %cx -; CHECK-O0-NEXT: # implicit-def: $eax -; CHECK-O0-NEXT: movw %cx, %ax -; CHECK-O0-NEXT: # implicit-def: $xmm0 -; CHECK-O0-NEXT: pinsrw $0, %eax, %xmm0 -; CHECK-O0-NEXT: retq -; ; CHECK-SSE-O0-LABEL: atomic_vec1_half: ; CHECK-SSE-O0: # %bb.0: ; CHECK-SSE-O0-NEXT: movw (%rdi), %cx @@ -396,11 +230,6 @@ define <1 x half> @atomic_vec1_half(ptr %x) { } define <1 x float> @atomic_vec1_float(ptr %x) { -; CHECK-O3-LABEL: atomic_vec1_float: -; CHECK-O3: # %bb.0: -; CHECK-O3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK-O3-NEXT: retq -; ; CHECK-SSE-O3-LABEL: atomic_vec1_float: ; CHECK-SSE-O3: # %bb.0: ; CHECK-SSE-O3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -411,11 +240,6 @@ define <1 x float> @atomic_vec1_float(ptr %x) { ; CHECK-AVX-O3-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-AVX-O3-NEXT: retq ; -; CHECK-O0-LABEL: atomic_vec1_float: -; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK-O0-NEXT: retq -; ; CHECK-SSE-O0-LABEL: atomic_vec1_float: ; CHECK-SSE-O0: # %bb.0: ; CHECK-SSE-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -430,11 +254,6 @@ define <1 x float> @atomic_vec1_float(ptr %x) { } define <1 x double> @atomic_vec1_double_align(ptr %x) nounwind { -; CHECK-O3-LABEL: atomic_vec1_double_align: -; CHECK-O3: # %bb.0: -; CHECK-O3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-O3-NEXT: retq -; ; CHECK-SSE-O3-LABEL: atomic_vec1_double_align: ; CHECK-SSE-O3: # %bb.0: ; CHECK-SSE-O3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -445,11 +264,6 @@ define <1 x double> @atomic_vec1_double_align(ptr %x) nounwind { ; CHECK-AVX-O3-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-AVX-O3-NEXT: retq ; -; CHECK-O0-LABEL: atomic_vec1_double_align: -; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-O0-NEXT: retq -; ; CHECK-SSE-O0-LABEL: atomic_vec1_double_align: ; CHECK-SSE-O0: # %bb.0: ; CHECK-SSE-O0-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -476,30 +290,6 @@ define <1 x i64> @atomic_vec1_i64(ptr %x) nounwind { ; CHECK-O3-NEXT: popq %rcx ; CHECK-O3-NEXT: retq ; -; CHECK-SSE-O3-LABEL: atomic_vec1_i64: -; CHECK-SSE-O3: # %bb.0: -; CHECK-SSE-O3-NEXT: pushq %rax -; CHECK-SSE-O3-NEXT: movq %rdi, %rsi -; CHECK-SSE-O3-NEXT: movq %rsp, %rdx -; CHECK-SSE-O3-NEXT: movl $8, %edi -; CHECK-SSE-O3-NEXT: movl $2, %ecx -; CHECK-SSE-O3-NEXT: callq __atomic_load@PLT -; CHECK-SSE-O3-NEXT: movq (%rsp), %rax -; CHECK-SSE-O3-NEXT: popq %rcx -; CHECK-SSE-O3-NEXT: retq -; -; CHECK-AVX-O3-LABEL: atomic_vec1_i64: -; CHECK-AVX-O3: # %bb.0: -; CHECK-AVX-O3-NEXT: pushq %rax -; CHECK-AVX-O3-NEXT: movq %rdi, %rsi -; CHECK-AVX-O3-NEXT: movq %rsp, %rdx -; CHECK-AVX-O3-NEXT: movl $8, %edi -; CHECK-AVX-O3-NEXT: movl $2, %ecx -; CHECK-AVX-O3-NEXT: callq __atomic_load@PLT -; CHECK-AVX-O3-NEXT: movq (%rsp), %rax -; CHECK-AVX-O3-NEXT: popq %rcx -; CHECK-AVX-O3-NEXT: retq -; ; CHECK-O0-LABEL: atomic_vec1_i64: ; CHECK-O0: # %bb.0: ; CHECK-O0-NEXT: pushq %rax @@ -511,47 +301,11 @@ define <1 x i64> @atomic_vec1_i64(ptr %x) nounwind { ; CHECK-O0-NEXT: movq (%rsp), %rax ; CHECK-O0-NEXT: popq %rcx ; CHECK-O0-NEXT: retq -; -; CHECK-SSE-O0-LABEL: atomic_vec1_i64: -; CHECK-SSE-O0: # %bb.0: -; CHECK-SSE-O0-NEXT: pushq %rax -; CHECK-SSE-O0-NEXT: movq %rdi, %rsi -; CHECK-SSE-O0-NEXT: movl $8, %edi -; CHECK-SSE-O0-NEXT: movq %rsp, %rdx -; CHECK-SSE-O0-NEXT: movl $2, %ecx -; CHECK-SSE-O0-NEXT: callq __atomic_load@PLT -; CHECK-SSE-O0-NEXT: movq (%rsp), %rax -; CHECK-SSE-O0-NEXT: popq %rcx -; CHECK-SSE-O0-NEXT: retq -; -; CHECK-AVX-O0-LABEL: atomic_vec1_i64: -; CHECK-AVX-O0: # %bb.0: -; CHECK-AVX-O0-NEXT: pushq %rax -; CHECK-AVX-O0-NEXT: movq %rdi, %rsi -; CHECK-AVX-O0-NEXT: movl $8, %edi -; CHECK-AVX-O0-NEXT: movq %rsp, %rdx -; CHECK-AVX-O0-NEXT: movl $2, %ecx -; CHECK-AVX-O0-NEXT: callq __atomic_load@PLT -; CHECK-AVX-O0-NEXT: movq (%rsp), %rax -; CHECK-AVX-O0-NEXT: popq %rcx -; CHECK-AVX-O0-NEXT: retq %ret = load atomic <1 x i64>, ptr %x acquire, align 4 ret <1 x i64> %ret } define <1 x double> @atomic_vec1_double(ptr %x) nounwind { -; CHECK-O3-LABEL: atomic_vec1_double: -; CHECK-O3: # %bb.0: -; CHECK-O3-NEXT: pushq %rax -; CHECK-O3-NEXT: movq %rdi, %rsi -; CHECK-O3-NEXT: movq %rsp, %rdx -; CHECK-O3-NEXT: movl $8, %edi -; CHECK-O3-NEXT: movl $2, %ecx -; CHECK-O3-NEXT: callq __atomic_load@PLT -; CHECK-O3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-O3-NEXT: popq %rax -; CHECK-O3-NEXT: retq -; ; CHECK-SSE-O3-LABEL: atomic_vec1_double: ; CHECK-SSE-O3: # %bb.0: ; CHECK-SSE-O3-NEXT: pushq %rax @@ -576,18 +330,6 @@ define <1 x double> @atomic_vec1_double(ptr %x) nounwind { ; CHECK-AVX-O3-NEXT: popq %rax ; CHECK-AVX-O3-NEXT: retq ; -; CHECK-O0-LABEL: atomic_vec1_double: -; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: pushq %rax -; CHECK-O0-NEXT: movq %rdi, %rsi -; CHECK-O0-NEXT: movl $8, %edi -; CHECK-O0-NEXT: movq %rsp, %rdx -; CHECK-O0-NEXT: movl $2, %ecx -; CHECK-O0-NEXT: callq __atomic_load@PLT -; CHECK-O0-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-O0-NEXT: popq %rax -; CHECK-O0-NEXT: retq -; ; CHECK-SSE-O0-LABEL: atomic_vec1_double: ; CHECK-SSE-O0: # %bb.0: ; CHECK-SSE-O0-NEXT: pushq %rax @@ -616,18 +358,6 @@ define <1 x double> @atomic_vec1_double(ptr %x) nounwind { } define <2 x i32> @atomic_vec2_i32(ptr %x) nounwind { -; CHECK-O3-LABEL: atomic_vec2_i32: -; CHECK-O3: # %bb.0: -; CHECK-O3-NEXT: pushq %rax -; CHECK-O3-NEXT: movq %rdi, %rsi -; CHECK-O3-NEXT: movq %rsp, %rdx -; CHECK-O3-NEXT: movl $8, %edi -; CHECK-O3-NEXT: movl $2, %ecx -; CHECK-O3-NEXT: callq __atomic_load@PLT -; CHECK-O3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-O3-NEXT: popq %rax -; CHECK-O3-NEXT: retq -; ; CHECK-SSE-O3-LABEL: atomic_vec2_i32: ; CHECK-SSE-O3: # %bb.0: ; CHECK-SSE-O3-NEXT: pushq %rax @@ -652,18 +382,6 @@ define <2 x i32> @atomic_vec2_i32(ptr %x) nounwind { ; CHECK-AVX-O3-NEXT: popq %rax ; CHECK-AVX-O3-NEXT: retq ; -; CHECK-O0-LABEL: atomic_vec2_i32: -; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: pushq %rax -; CHECK-O0-NEXT: movq %rdi, %rsi -; CHECK-O0-NEXT: movl $8, %edi -; CHECK-O0-NEXT: movq %rsp, %rdx -; CHECK-O0-NEXT: movl $2, %ecx -; CHECK-O0-NEXT: callq __atomic_load@PLT -; CHECK-O0-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; CHECK-O0-NEXT: popq %rax -; CHECK-O0-NEXT: retq -; ; CHECK-SSE-O0-LABEL: atomic_vec2_i32: ; CHECK-SSE-O0: # %bb.0: ; CHECK-SSE-O0-NEXT: pushq %rax @@ -692,18 +410,6 @@ define <2 x i32> @atomic_vec2_i32(ptr %x) nounwind { } define <4 x float> @atomic_vec4_float(ptr %x) nounwind { -; CHECK-O3-LABEL: atomic_vec4_float: -; CHECK-O3: # %bb.0: -; CHECK-O3-NEXT: subq $24, %rsp -; CHECK-O3-NEXT: movq %rdi, %rsi -; CHECK-O3-NEXT: movq %rsp, %rdx -; CHECK-O3-NEXT: movl $16, %edi -; CHECK-O3-NEXT: movl $2, %ecx -; CHECK-O3-NEXT: callq __atomic_load@PLT -; CHECK-O3-NEXT: movaps (%rsp), %xmm0 -; CHECK-O3-NEXT: addq $24, %rsp -; CHECK-O3-NEXT: retq -; ; CHECK-SSE-O3-LABEL: atomic_vec4_float: ; CHECK-SSE-O3: # %bb.0: ; CHECK-SSE-O3-NEXT: subq $24, %rsp @@ -728,18 +434,6 @@ define <4 x float> @atomic_vec4_float(ptr %x) nounwind { ; CHECK-AVX-O3-NEXT: addq $24, %rsp ; CHECK-AVX-O3-NEXT: retq ; -; CHECK-O0-LABEL: atomic_vec4_float: -; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: subq $24, %rsp -; CHECK-O0-NEXT: movq %rdi, %rsi -; CHECK-O0-NEXT: movl $16, %edi -; CHECK-O0-NEXT: movq %rsp, %rdx -; CHECK-O0-NEXT: movl $2, %ecx -; CHECK-O0-NEXT: callq __atomic_load@PLT -; CHECK-O0-NEXT: movaps (%rsp), %xmm0 -; CHECK-O0-NEXT: addq $24, %rsp -; CHECK-O0-NEXT: retq -; ; CHECK-SSE-O0-LABEL: atomic_vec4_float: ; CHECK-SSE-O0: # %bb.0: ; CHECK-SSE-O0-NEXT: subq $24, %rsp @@ -768,21 +462,6 @@ define <4 x float> @atomic_vec4_float(ptr %x) nounwind { } define <8 x double> @atomic_vec8_double(ptr %x) nounwind { -; CHECK-O3-LABEL: atomic_vec8_double: -; CHECK-O3: # %bb.0: -; CHECK-O3-NEXT: subq $72, %rsp -; CHECK-O3-NEXT: movq %rdi, %rsi -; CHECK-O3-NEXT: movq %rsp, %rdx -; CHECK-O3-NEXT: movl $64, %edi -; CHECK-O3-NEXT: movl $2, %ecx -; CHECK-O3-NEXT: callq __atomic_load@PLT -; CHECK-O3-NEXT: movaps (%rsp), %xmm0 -; CHECK-O3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 -; CHECK-O3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm2 -; CHECK-O3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm3 -; CHECK-O3-NEXT: addq $72, %rsp -; CHECK-O3-NEXT: retq -; ; CHECK-SSE-O3-LABEL: atomic_vec8_double: ; CHECK-SSE-O3: # %bb.0: ; CHECK-SSE-O3-NEXT: subq $72, %rsp @@ -798,20 +477,30 @@ define <8 x double> @atomic_vec8_double(ptr %x) nounwind { ; CHECK-SSE-O3-NEXT: addq $72, %rsp ; CHECK-SSE-O3-NEXT: retq ; -; CHECK-O0-LABEL: atomic_vec8_double: -; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: subq $72, %rsp -; CHECK-O0-NEXT: movq %rdi, %rsi -; CHECK-O0-NEXT: movl $64, %edi -; CHECK-O0-NEXT: movq %rsp, %rdx -; CHECK-O0-NEXT: movl $2, %ecx -; CHECK-O0-NEXT: callq __atomic_load@PLT -; CHECK-O0-NEXT: movapd (%rsp), %xmm0 -; CHECK-O0-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1 -; CHECK-O0-NEXT: movapd {{[0-9]+}}(%rsp), %xmm2 -; CHECK-O0-NEXT: movapd {{[0-9]+}}(%rsp), %xmm3 -; CHECK-O0-NEXT: addq $72, %rsp -; CHECK-O0-NEXT: retq +; CHECK-AVX2-O3-LABEL: atomic_vec8_double: +; CHECK-AVX2-O3: # %bb.0: +; CHECK-AVX2-O3-NEXT: subq $72, %rsp +; CHECK-AVX2-O3-NEXT: movq %rdi, %rsi +; CHECK-AVX2-O3-NEXT: movq %rsp, %rdx +; CHECK-AVX2-O3-NEXT: movl $64, %edi +; CHECK-AVX2-O3-NEXT: movl $2, %ecx +; CHECK-AVX2-O3-NEXT: callq __atomic_load@PLT +; CHECK-AVX2-O3-NEXT: vmovups (%rsp), %ymm0 +; CHECK-AVX2-O3-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm1 +; CHECK-AVX2-O3-NEXT: addq $72, %rsp +; CHECK-AVX2-O3-NEXT: retq +; +; CHECK-AVX512-O3-LABEL: atomic_vec8_double: +; CHECK-AVX512-O3: # %bb.0: +; CHECK-AVX512-O3-NEXT: subq $72, %rsp +; CHECK-AVX512-O3-NEXT: movq %rdi, %rsi +; CHECK-AVX512-O3-NEXT: movq %rsp, %rdx +; CHECK-AVX512-O3-NEXT: movl $64, %edi +; CHECK-AVX512-O3-NEXT: movl $2, %ecx +; CHECK-AVX512-O3-NEXT: callq __atomic_load@PLT +; CHECK-AVX512-O3-NEXT: vmovups (%rsp), %zmm0 +; CHECK-AVX512-O3-NEXT: addq $72, %rsp +; CHECK-AVX512-O3-NEXT: retq ; ; CHECK-SSE-O0-LABEL: atomic_vec8_double: ; CHECK-SSE-O0: # %bb.0: @@ -827,24 +516,36 @@ define <8 x double> @atomic_vec8_double(ptr %x) nounwind { ; CHECK-SSE-O0-NEXT: movapd {{[0-9]+}}(%rsp), %xmm3 ; CHECK-SSE-O0-NEXT: addq $72, %rsp ; CHECK-SSE-O0-NEXT: retq +; +; CHECK-AVX2-O0-LABEL: atomic_vec8_double: +; CHECK-AVX2-O0: # %bb.0: +; CHECK-AVX2-O0-NEXT: subq $72, %rsp +; CHECK-AVX2-O0-NEXT: movq %rdi, %rsi +; CHECK-AVX2-O0-NEXT: movl $64, %edi +; CHECK-AVX2-O0-NEXT: movq %rsp, %rdx +; CHECK-AVX2-O0-NEXT: movl $2, %ecx +; CHECK-AVX2-O0-NEXT: callq __atomic_load@PLT +; CHECK-AVX2-O0-NEXT: vmovupd (%rsp), %ymm0 +; CHECK-AVX2-O0-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm1 +; CHECK-AVX2-O0-NEXT: addq $72, %rsp +; CHECK-AVX2-O0-NEXT: retq +; +; CHECK-AVX512-O0-LABEL: atomic_vec8_double: +; CHECK-AVX512-O0: # %bb.0: +; CHECK-AVX512-O0-NEXT: subq $72, %rsp +; CHECK-AVX512-O0-NEXT: movq %rdi, %rsi +; CHECK-AVX512-O0-NEXT: movl $64, %edi +; CHECK-AVX512-O0-NEXT: movq %rsp, %rdx +; CHECK-AVX512-O0-NEXT: movl $2, %ecx +; CHECK-AVX512-O0-NEXT: callq __atomic_load@PLT +; CHECK-AVX512-O0-NEXT: vmovupd (%rsp), %zmm0 +; CHECK-AVX512-O0-NEXT: addq $72, %rsp +; CHECK-AVX512-O0-NEXT: retq %ret = load atomic <8 x double>, ptr %x acquire, align 4 ret <8 x double> %ret } define <16 x bfloat> @atomic_vec16_bfloat(ptr %x) nounwind { -; CHECK-O3-LABEL: atomic_vec16_bfloat: -; CHECK-O3: # %bb.0: -; CHECK-O3-NEXT: subq $40, %rsp -; CHECK-O3-NEXT: movq %rdi, %rsi -; CHECK-O3-NEXT: movq %rsp, %rdx -; CHECK-O3-NEXT: movl $32, %edi -; CHECK-O3-NEXT: movl $2, %ecx -; CHECK-O3-NEXT: callq __atomic_load@PLT -; CHECK-O3-NEXT: movaps (%rsp), %xmm0 -; CHECK-O3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 -; CHECK-O3-NEXT: addq $40, %rsp -; CHECK-O3-NEXT: retq -; ; CHECK-SSE-O3-LABEL: atomic_vec16_bfloat: ; CHECK-SSE-O3: # %bb.0: ; CHECK-SSE-O3-NEXT: subq $40, %rsp @@ -870,19 +571,6 @@ define <16 x bfloat> @atomic_vec16_bfloat(ptr %x) nounwind { ; CHECK-AVX-O3-NEXT: addq $40, %rsp ; CHECK-AVX-O3-NEXT: retq ; -; CHECK-O0-LABEL: atomic_vec16_bfloat: -; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: subq $40, %rsp -; CHECK-O0-NEXT: movq %rdi, %rsi -; CHECK-O0-NEXT: movl $32, %edi -; CHECK-O0-NEXT: movq %rsp, %rdx -; CHECK-O0-NEXT: movl $2, %ecx -; CHECK-O0-NEXT: callq __atomic_load@PLT -; CHECK-O0-NEXT: movaps (%rsp), %xmm0 -; CHECK-O0-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 -; CHECK-O0-NEXT: addq $40, %rsp -; CHECK-O0-NEXT: retq -; ; CHECK-SSE-O0-LABEL: atomic_vec16_bfloat: ; CHECK-SSE-O0: # %bb.0: ; CHECK-SSE-O0-NEXT: subq $40, %rsp @@ -912,21 +600,6 @@ define <16 x bfloat> @atomic_vec16_bfloat(ptr %x) nounwind { } define <32 x half> @atomic_vec32_half(ptr %x) nounwind { -; CHECK-O3-LABEL: atomic_vec32_half: -; CHECK-O3: # %bb.0: -; CHECK-O3-NEXT: subq $72, %rsp -; CHECK-O3-NEXT: movq %rdi, %rsi -; CHECK-O3-NEXT: movq %rsp, %rdx -; CHECK-O3-NEXT: movl $64, %edi -; CHECK-O3-NEXT: movl $2, %ecx -; CHECK-O3-NEXT: callq __atomic_load@PLT -; CHECK-O3-NEXT: movaps (%rsp), %xmm0 -; CHECK-O3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 -; CHECK-O3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm2 -; CHECK-O3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm3 -; CHECK-O3-NEXT: addq $72, %rsp -; CHECK-O3-NEXT: retq -; ; CHECK-SSE-O3-LABEL: atomic_vec32_half: ; CHECK-SSE-O3: # %bb.0: ; CHECK-SSE-O3-NEXT: subq $72, %rsp @@ -942,20 +615,30 @@ define <32 x half> @atomic_vec32_half(ptr %x) nounwind { ; CHECK-SSE-O3-NEXT: addq $72, %rsp ; CHECK-SSE-O3-NEXT: retq ; -; CHECK-O0-LABEL: atomic_vec32_half: -; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: subq $72, %rsp -; CHECK-O0-NEXT: movq %rdi, %rsi -; CHECK-O0-NEXT: movl $64, %edi -; CHECK-O0-NEXT: movq %rsp, %rdx -; CHECK-O0-NEXT: movl $2, %ecx -; CHECK-O0-NEXT: callq __atomic_load@PLT -; CHECK-O0-NEXT: movaps (%rsp), %xmm0 -; CHECK-O0-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 -; CHECK-O0-NEXT: movaps {{[0-9]+}}(%rsp), %xmm2 -; CHECK-O0-NEXT: movaps {{[0-9]+}}(%rsp), %xmm3 -; CHECK-O0-NEXT: addq $72, %rsp -; CHECK-O0-NEXT: retq +; CHECK-AVX2-O3-LABEL: atomic_vec32_half: +; CHECK-AVX2-O3: # %bb.0: +; CHECK-AVX2-O3-NEXT: subq $72, %rsp +; CHECK-AVX2-O3-NEXT: movq %rdi, %rsi +; CHECK-AVX2-O3-NEXT: movq %rsp, %rdx +; CHECK-AVX2-O3-NEXT: movl $64, %edi +; CHECK-AVX2-O3-NEXT: movl $2, %ecx +; CHECK-AVX2-O3-NEXT: callq __atomic_load@PLT +; CHECK-AVX2-O3-NEXT: vmovups (%rsp), %ymm0 +; CHECK-AVX2-O3-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm1 +; CHECK-AVX2-O3-NEXT: addq $72, %rsp +; CHECK-AVX2-O3-NEXT: retq +; +; CHECK-AVX512-O3-LABEL: atomic_vec32_half: +; CHECK-AVX512-O3: # %bb.0: +; CHECK-AVX512-O3-NEXT: subq $72, %rsp +; CHECK-AVX512-O3-NEXT: movq %rdi, %rsi +; CHECK-AVX512-O3-NEXT: movq %rsp, %rdx +; CHECK-AVX512-O3-NEXT: movl $64, %edi +; CHECK-AVX512-O3-NEXT: movl $2, %ecx +; CHECK-AVX512-O3-NEXT: callq __atomic_load@PLT +; CHECK-AVX512-O3-NEXT: vmovups (%rsp), %zmm0 +; CHECK-AVX512-O3-NEXT: addq $72, %rsp +; CHECK-AVX512-O3-NEXT: retq ; ; CHECK-SSE-O0-LABEL: atomic_vec32_half: ; CHECK-SSE-O0: # %bb.0: @@ -971,6 +654,31 @@ define <32 x half> @atomic_vec32_half(ptr %x) nounwind { ; CHECK-SSE-O0-NEXT: movaps {{[0-9]+}}(%rsp), %xmm3 ; CHECK-SSE-O0-NEXT: addq $72, %rsp ; CHECK-SSE-O0-NEXT: retq +; +; CHECK-AVX2-O0-LABEL: atomic_vec32_half: +; CHECK-AVX2-O0: # %bb.0: +; CHECK-AVX2-O0-NEXT: subq $72, %rsp +; CHECK-AVX2-O0-NEXT: movq %rdi, %rsi +; CHECK-AVX2-O0-NEXT: movl $64, %edi +; CHECK-AVX2-O0-NEXT: movq %rsp, %rdx +; CHECK-AVX2-O0-NEXT: movl $2, %ecx +; CHECK-AVX2-O0-NEXT: callq __atomic_load@PLT +; CHECK-AVX2-O0-NEXT: vmovups (%rsp), %ymm0 +; CHECK-AVX2-O0-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm1 +; CHECK-AVX2-O0-NEXT: addq $72, %rsp +; CHECK-AVX2-O0-NEXT: retq +; +; CHECK-AVX512-O0-LABEL: atomic_vec32_half: +; CHECK-AVX512-O0: # %bb.0: +; CHECK-AVX512-O0-NEXT: subq $72, %rsp +; CHECK-AVX512-O0-NEXT: movq %rdi, %rsi +; CHECK-AVX512-O0-NEXT: movl $64, %edi +; CHECK-AVX512-O0-NEXT: movq %rsp, %rdx +; CHECK-AVX512-O0-NEXT: movl $2, %ecx +; CHECK-AVX512-O0-NEXT: callq __atomic_load@PLT +; CHECK-AVX512-O0-NEXT: vmovups (%rsp), %zmm0 +; CHECK-AVX512-O0-NEXT: addq $72, %rsp +; CHECK-AVX512-O0-NEXT: retq %ret = load atomic <32 x half>, ptr %x acquire, align 4 ret <32 x half> %ret } diff --git a/llvm/test/CodeGen/X86/bittest-big-integer.ll b/llvm/test/CodeGen/X86/bittest-big-integer.ll index 19d751d1..cc3dcf3 100644 --- a/llvm/test/CodeGen/X86/bittest-big-integer.ll +++ b/llvm/test/CodeGen/X86/bittest-big-integer.ll @@ -203,24 +203,14 @@ define i1 @init_eq_i32(ptr %word, i32 %position, i1 zeroext %value) nounwind { define i1 @test_ne_i64(ptr %word, i32 %position) nounwind { ; X86-LABEL: test_ne_i64: ; X86: # %bb.0: -; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $1, %edx -; X86-NEXT: xorl %esi, %esi -; X86-NEXT: shldl %cl, %edx, %esi -; X86-NEXT: shll %cl, %edx -; X86-NEXT: testb $32, %cl -; X86-NEXT: je .LBB5_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %edx, %esi -; X86-NEXT: xorl %edx, %edx -; X86-NEXT: .LBB5_2: -; X86-NEXT: andl 4(%eax), %esi -; X86-NEXT: andl (%eax), %edx -; X86-NEXT: orl %esi, %edx -; X86-NEXT: setne %al -; X86-NEXT: popl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl %ecx, %edx +; X86-NEXT: andl $32, %edx +; X86-NEXT: shrl $3, %edx +; X86-NEXT: movl (%eax,%edx), %eax +; X86-NEXT: btl %ecx, %eax +; X86-NEXT: setb %al ; X86-NEXT: retl ; ; X64-LABEL: test_ne_i64: @@ -242,38 +232,20 @@ define i1 @test_ne_i64(ptr %word, i32 %position) nounwind { define i1 @complement_ne_i64(ptr %word, i32 %position) nounwind { ; X86-LABEL: complement_ne_i64: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $1, %eax -; X86-NEXT: xorl %esi, %esi -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: shll %cl, %eax -; X86-NEXT: testb $32, %cl -; X86-NEXT: je .LBB6_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %eax, %esi -; X86-NEXT: xorl %eax, %eax -; X86-NEXT: .LBB6_2: -; X86-NEXT: movl (%edx), %ecx -; X86-NEXT: movl 4(%edx), %edi -; X86-NEXT: movl %edi, %ebx -; X86-NEXT: andl %esi, %ebx -; X86-NEXT: movl %ecx, %ebp -; X86-NEXT: andl %eax, %ebp -; X86-NEXT: xorl %esi, %edi -; X86-NEXT: xorl %eax, %ecx -; X86-NEXT: orl %ebx, %ebp -; X86-NEXT: setne %al -; X86-NEXT: movl %ecx, (%edx) -; X86-NEXT: movl %edi, 4(%edx) +; X86-NEXT: movl %edx, %esi +; X86-NEXT: andl $32, %esi +; X86-NEXT: shrl $3, %esi +; X86-NEXT: movl (%ecx,%esi), %edi +; X86-NEXT: btl %edx, %edi +; X86-NEXT: setb %al +; X86-NEXT: btcl %edx, %edi +; X86-NEXT: movl %edi, (%ecx,%esi) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl ; ; X64-LABEL: complement_ne_i64: @@ -300,40 +272,20 @@ define i1 @complement_ne_i64(ptr %word, i32 %position) nounwind { define i1 @reset_eq_i64(ptr %word, i32 %position) nounwind { ; X86-LABEL: reset_eq_i64: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $1, %esi -; X86-NEXT: xorl %edi, %edi -; X86-NEXT: shldl %cl, %esi, %edi -; X86-NEXT: shll %cl, %esi -; X86-NEXT: testb $32, %cl -; X86-NEXT: je .LBB7_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %esi, %edi -; X86-NEXT: xorl %esi, %esi -; X86-NEXT: .LBB7_2: -; X86-NEXT: movl (%edx), %eax -; X86-NEXT: movl 4(%edx), %ecx -; X86-NEXT: movl %ecx, %ebx -; X86-NEXT: andl %edi, %ebx -; X86-NEXT: notl %edi -; X86-NEXT: movl %eax, %ebp -; X86-NEXT: andl %esi, %ebp -; X86-NEXT: notl %esi -; X86-NEXT: andl %ecx, %edi -; X86-NEXT: andl %eax, %esi -; X86-NEXT: orl %ebx, %ebp -; X86-NEXT: sete %al -; X86-NEXT: movl %esi, (%edx) -; X86-NEXT: movl %edi, 4(%edx) +; X86-NEXT: movl %edx, %esi +; X86-NEXT: andl $32, %esi +; X86-NEXT: shrl $3, %esi +; X86-NEXT: movl (%ecx,%esi), %edi +; X86-NEXT: btl %edx, %edi +; X86-NEXT: setae %al +; X86-NEXT: btrl %edx, %edi +; X86-NEXT: movl %edi, (%ecx,%esi) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl ; ; X64-LABEL: reset_eq_i64: @@ -361,38 +313,20 @@ define i1 @reset_eq_i64(ptr %word, i32 %position) nounwind { define i1 @set_ne_i64(ptr %word, i32 %position) nounwind { ; X86-LABEL: set_ne_i64: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $1, %eax -; X86-NEXT: xorl %esi, %esi -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: shll %cl, %eax -; X86-NEXT: testb $32, %cl -; X86-NEXT: je .LBB8_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %eax, %esi -; X86-NEXT: xorl %eax, %eax -; X86-NEXT: .LBB8_2: -; X86-NEXT: movl (%edx), %ecx -; X86-NEXT: movl 4(%edx), %edi -; X86-NEXT: movl %edi, %ebx -; X86-NEXT: andl %esi, %ebx -; X86-NEXT: movl %ecx, %ebp -; X86-NEXT: andl %eax, %ebp -; X86-NEXT: orl %esi, %edi -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: orl %ebx, %ebp -; X86-NEXT: setne %al -; X86-NEXT: movl %ecx, (%edx) -; X86-NEXT: movl %edi, 4(%edx) +; X86-NEXT: movl %edx, %esi +; X86-NEXT: andl $32, %esi +; X86-NEXT: shrl $3, %esi +; X86-NEXT: movl (%ecx,%esi), %edi +; X86-NEXT: btl %edx, %edi +; X86-NEXT: setb %al +; X86-NEXT: btsl %edx, %edi +; X86-NEXT: movl %edi, (%ecx,%esi) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl ; ; X64-LABEL: set_ne_i64: @@ -419,52 +353,47 @@ define i1 @set_ne_i64(ptr %word, i32 %position) nounwind { define i1 @init_eq_i64(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; X86-LABEL: init_eq_i64: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $1, %eax -; X86-NEXT: xorl %edx, %edx -; X86-NEXT: shldl %cl, %eax, %edx -; X86-NEXT: shll %cl, %eax -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl $1, %edx +; X86-NEXT: xorl %esi, %esi +; X86-NEXT: shldl %cl, %edx, %esi +; X86-NEXT: shll %cl, %edx +; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: xorl %edi, %edi -; X86-NEXT: shldl %cl, %esi, %edi -; X86-NEXT: shll %cl, %esi +; X86-NEXT: shldl %cl, %eax, %edi +; X86-NEXT: shll %cl, %eax ; X86-NEXT: testb $32, %cl ; X86-NEXT: je .LBB9_2 ; X86-NEXT: # %bb.1: -; X86-NEXT: movl %eax, %edx -; X86-NEXT: movl $0, %eax +; X86-NEXT: movl %edx, %esi +; X86-NEXT: movl $0, %edx ; X86-NEXT: .LBB9_2: -; X86-NEXT: movl %edx, %ebx -; X86-NEXT: notl %ebx -; X86-NEXT: movl %eax, %ebp -; X86-NEXT: notl %ebp +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X86-NEXT: notl %esi +; X86-NEXT: notl %edx ; X86-NEXT: je .LBB9_4 ; X86-NEXT: # %bb.3: -; X86-NEXT: movl %esi, %edi -; X86-NEXT: xorl %esi, %esi +; X86-NEXT: movl %eax, %edi +; X86-NEXT: xorl %eax, %eax ; X86-NEXT: .LBB9_4: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl 4(%ecx), %ecx -; X86-NEXT: andl %ecx, %edx -; X86-NEXT: andl %ecx, %ebx -; X86-NEXT: orl %edi, %ebx -; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: movl (%edi), %ecx -; X86-NEXT: andl %ecx, %eax -; X86-NEXT: andl %ecx, %ebp -; X86-NEXT: orl %esi, %ebp -; X86-NEXT: orl %edx, %eax -; X86-NEXT: movl %ebp, (%edi) -; X86-NEXT: movl %ebx, 4(%edi) -; X86-NEXT: sete %al +; X86-NEXT: andl 4(%ebx), %esi +; X86-NEXT: orl %edi, %esi +; X86-NEXT: andl (%ebx), %edx +; X86-NEXT: orl %eax, %edx +; X86-NEXT: movl %ecx, %eax +; X86-NEXT: andl $32, %eax +; X86-NEXT: shrl $3, %eax +; X86-NEXT: movl (%ebx,%eax), %eax +; X86-NEXT: btl %ecx, %eax +; X86-NEXT: setae %al +; X86-NEXT: movl %esi, 4(%ebx) +; X86-NEXT: movl %edx, (%ebx) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl ; ; SSE-LABEL: init_eq_i64: @@ -516,101 +445,25 @@ define i1 @init_eq_i64(ptr %word, i32 %position, i1 zeroext %value) nounwind { define i1 @test_ne_i128(ptr %word, i32 %position) nounwind { ; X86-LABEL: test_ne_i128: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: movl %esp, %ebp -; X86-NEXT: pushl %ebx -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: andl $-16, %esp -; X86-NEXT: subl $48, %esp -; X86-NEXT: movzbl 12(%ebp), %ecx -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $1, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, (%esp) -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: shrb $3, %al -; X86-NEXT: andb $12, %al -; X86-NEXT: negb %al -; X86-NEXT: movsbl %al, %esi -; X86-NEXT: movl 24(%esp,%esi), %edi -; X86-NEXT: movl 28(%esp,%esi), %eax -; X86-NEXT: shldl %cl, %edi, %eax -; X86-NEXT: movl 16(%esp,%esi), %edx -; X86-NEXT: movl 20(%esp,%esi), %esi -; X86-NEXT: shldl %cl, %esi, %edi -; X86-NEXT: shldl %cl, %edx, %esi -; X86-NEXT: movl 8(%ebp), %ebx -; X86-NEXT: shll %cl, %edx -; X86-NEXT: andl 8(%ebx), %edi -; X86-NEXT: andl (%ebx), %edx -; X86-NEXT: orl %edi, %edx -; X86-NEXT: andl 12(%ebx), %eax -; X86-NEXT: andl 4(%ebx), %esi -; X86-NEXT: orl %eax, %esi -; X86-NEXT: orl %edx, %esi -; X86-NEXT: setne %al -; X86-NEXT: leal -12(%ebp), %esp -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl %ecx, %edx +; X86-NEXT: andl $96, %edx +; X86-NEXT: shrl $3, %edx +; X86-NEXT: movl (%eax,%edx), %eax +; X86-NEXT: btl %ecx, %eax +; X86-NEXT: setb %al ; X86-NEXT: retl ; -; SSE-LABEL: test_ne_i128: -; SSE: # %bb.0: -; SSE-NEXT: movl %esi, %ecx -; SSE-NEXT: movl $1, %eax -; SSE-NEXT: xorl %edx, %edx -; SSE-NEXT: shldq %cl, %rax, %rdx -; SSE-NEXT: xorl %esi, %esi -; SSE-NEXT: shlq %cl, %rax -; SSE-NEXT: testb $64, %cl -; SSE-NEXT: cmovneq %rax, %rdx -; SSE-NEXT: cmovneq %rsi, %rax -; SSE-NEXT: andq 8(%rdi), %rdx -; SSE-NEXT: andq (%rdi), %rax -; SSE-NEXT: orq %rdx, %rax -; SSE-NEXT: setne %al -; SSE-NEXT: retq -; -; AVX2-LABEL: test_ne_i128: -; AVX2: # %bb.0: -; AVX2-NEXT: movl %esi, %ecx -; AVX2-NEXT: xorl %eax, %eax -; AVX2-NEXT: movl $1, %edx -; AVX2-NEXT: xorl %esi, %esi -; AVX2-NEXT: shldq %cl, %rdx, %rsi -; AVX2-NEXT: shlxq %rcx, %rdx, %rdx -; AVX2-NEXT: testb $64, %cl -; AVX2-NEXT: cmovneq %rdx, %rsi -; AVX2-NEXT: cmovneq %rax, %rdx -; AVX2-NEXT: andq 8(%rdi), %rsi -; AVX2-NEXT: andq (%rdi), %rdx -; AVX2-NEXT: orq %rsi, %rdx -; AVX2-NEXT: setne %al -; AVX2-NEXT: retq -; -; AVX512-LABEL: test_ne_i128: -; AVX512: # %bb.0: -; AVX512-NEXT: movl %esi, %ecx -; AVX512-NEXT: movl $1, %eax -; AVX512-NEXT: xorl %edx, %edx -; AVX512-NEXT: shldq %cl, %rax, %rdx -; AVX512-NEXT: xorl %esi, %esi -; AVX512-NEXT: shlxq %rcx, %rax, %rax -; AVX512-NEXT: testb $64, %cl -; AVX512-NEXT: cmovneq %rax, %rdx -; AVX512-NEXT: cmovneq %rsi, %rax -; AVX512-NEXT: andq 8(%rdi), %rdx -; AVX512-NEXT: andq (%rdi), %rax -; AVX512-NEXT: orq %rdx, %rax -; AVX512-NEXT: setne %al -; AVX512-NEXT: retq +; X64-LABEL: test_ne_i128: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %eax +; X64-NEXT: andl $96, %eax +; X64-NEXT: shrl $3, %eax +; X64-NEXT: movl (%rdi,%rax), %eax +; X64-NEXT: btl %esi, %eax +; X64-NEXT: setb %al +; X64-NEXT: retq %rem = and i32 %position, 127 %ofs = zext nneg i32 %rem to i128 %bit = shl nuw i128 1, %ofs @@ -623,124 +476,33 @@ define i1 @test_ne_i128(ptr %word, i32 %position) nounwind { define i1 @complement_ne_i128(ptr %word, i32 %position) nounwind { ; X86-LABEL: complement_ne_i128: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: movl %esp, %ebp -; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: andl $-16, %esp -; X86-NEXT: subl $80, %esp -; X86-NEXT: movzbl 12(%ebp), %ecx -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $1, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: shrb $3, %al -; X86-NEXT: andb $12, %al -; X86-NEXT: negb %al -; X86-NEXT: movsbl %al, %eax -; X86-NEXT: movl 56(%esp,%eax), %esi -; X86-NEXT: movl 60(%esp,%eax), %edx -; X86-NEXT: shldl %cl, %esi, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 48(%esp,%eax), %edi -; X86-NEXT: movl 52(%esp,%eax), %ebx -; X86-NEXT: shldl %cl, %ebx, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edi, %ebx -; X86-NEXT: movl 8(%ebp), %eax -; X86-NEXT: shll %cl, %edi -; X86-NEXT: movl %eax, %ecx -; X86-NEXT: movl 8(%eax), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %esi, %eax -; X86-NEXT: movl (%ecx), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %ecx, %esi -; X86-NEXT: movl %edx, %ecx -; X86-NEXT: andl %edi, %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl 12(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 4(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ebx, %eax -; X86-NEXT: orl %edx, %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload -; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: orl %ecx, %eax -; X86-NEXT: movl 8(%ebp), %eax -; X86-NEXT: movl %edx, 8(%eax) -; X86-NEXT: movl %esi, 12(%eax) -; X86-NEXT: movl %edi, (%eax) -; X86-NEXT: movl %ebx, 4(%eax) -; X86-NEXT: setne %al -; X86-NEXT: leal -12(%ebp), %esp +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %edx, %esi +; X86-NEXT: andl $96, %esi +; X86-NEXT: shrl $3, %esi +; X86-NEXT: movl (%ecx,%esi), %edi +; X86-NEXT: btl %edx, %edi +; X86-NEXT: setb %al +; X86-NEXT: btcl %edx, %edi +; X86-NEXT: movl %edi, (%ecx,%esi) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl ; -; SSE-LABEL: complement_ne_i128: -; SSE: # %bb.0: -; SSE-NEXT: movl %esi, %ecx -; SSE-NEXT: movl $1, %edx -; SSE-NEXT: xorl %esi, %esi -; SSE-NEXT: shldq %cl, %rdx, %rsi -; SSE-NEXT: shlq %cl, %rdx -; SSE-NEXT: xorl %eax, %eax -; SSE-NEXT: testb $64, %cl -; SSE-NEXT: cmovneq %rdx, %rsi -; SSE-NEXT: cmovneq %rax, %rdx -; SSE-NEXT: movq (%rdi), %rax -; SSE-NEXT: movq 8(%rdi), %rcx -; SSE-NEXT: movq %rcx, %r8 -; SSE-NEXT: andq %rsi, %r8 -; SSE-NEXT: movq %rax, %r9 -; SSE-NEXT: andq %rdx, %r9 -; SSE-NEXT: xorq %rcx, %rsi -; SSE-NEXT: xorq %rax, %rdx -; SSE-NEXT: orq %r8, %r9 -; SSE-NEXT: setne %al -; SSE-NEXT: movq %rdx, (%rdi) -; SSE-NEXT: movq %rsi, 8(%rdi) -; SSE-NEXT: retq -; -; AVX-LABEL: complement_ne_i128: -; AVX: # %bb.0: -; AVX-NEXT: movl %esi, %ecx -; AVX-NEXT: xorl %eax, %eax -; AVX-NEXT: movl $1, %edx -; AVX-NEXT: xorl %esi, %esi -; AVX-NEXT: shldq %cl, %rdx, %rsi -; AVX-NEXT: shlxq %rcx, %rdx, %rdx -; AVX-NEXT: testb $64, %cl -; AVX-NEXT: cmovneq %rdx, %rsi -; AVX-NEXT: cmovneq %rax, %rdx -; AVX-NEXT: movq (%rdi), %rax -; AVX-NEXT: movq 8(%rdi), %rcx -; AVX-NEXT: movq %rcx, %r8 -; AVX-NEXT: andq %rsi, %r8 -; AVX-NEXT: movq %rax, %r9 -; AVX-NEXT: andq %rdx, %r9 -; AVX-NEXT: xorq %rcx, %rsi -; AVX-NEXT: xorq %rax, %rdx -; AVX-NEXT: orq %r8, %r9 -; AVX-NEXT: setne %al -; AVX-NEXT: movq %rdx, (%rdi) -; AVX-NEXT: movq %rsi, 8(%rdi) -; AVX-NEXT: retq +; X64-LABEL: complement_ne_i128: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: andl $96, %ecx +; X64-NEXT: shrl $3, %ecx +; X64-NEXT: movl (%rdi,%rcx), %edx +; X64-NEXT: btl %esi, %edx +; X64-NEXT: setb %al +; X64-NEXT: btcl %esi, %edx +; X64-NEXT: movl %edx, (%rdi,%rcx) +; X64-NEXT: retq %rem = and i32 %position, 127 %ofs = zext nneg i32 %rem to i128 %bit = shl nuw i128 1, %ofs @@ -755,124 +517,33 @@ define i1 @complement_ne_i128(ptr %word, i32 %position) nounwind { define i1 @reset_eq_i128(ptr %word, i32 %position) nounwind { ; X86-LABEL: reset_eq_i128: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: movl %esp, %ebp -; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: andl $-16, %esp -; X86-NEXT: subl $80, %esp -; X86-NEXT: movzbl 12(%ebp), %ecx -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $1, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: shrb $3, %al -; X86-NEXT: andb $12, %al -; X86-NEXT: negb %al -; X86-NEXT: movsbl %al, %eax -; X86-NEXT: movl 56(%esp,%eax), %edx -; X86-NEXT: movl 60(%esp,%eax), %esi -; X86-NEXT: shldl %cl, %edx, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 48(%esp,%eax), %esi -; X86-NEXT: movl 52(%esp,%eax), %edi -; X86-NEXT: shldl %cl, %edi, %edx -; X86-NEXT: shldl %cl, %esi, %edi -; X86-NEXT: movl 8(%ebp), %ebx -; X86-NEXT: shll %cl, %esi -; X86-NEXT: movl 8(%ebx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edx, %eax -; X86-NEXT: movl (%ebx), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %esi, %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 12(%ebx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl %edi, %ecx -; X86-NEXT: movl 4(%ebx), %ebx -; X86-NEXT: andl %ebx, %edi -; X86-NEXT: orl %eax, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: notl %eax -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: notl %ecx -; X86-NEXT: andl %ebx, %ecx -; X86-NEXT: notl %esi -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: movl 8(%ebp), %edi -; X86-NEXT: movl %edx, 8(%edi) -; X86-NEXT: movl %eax, 12(%edi) -; X86-NEXT: movl %esi, (%edi) -; X86-NEXT: movl %ecx, 4(%edi) -; X86-NEXT: sete %al -; X86-NEXT: leal -12(%ebp), %esp +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %edx, %esi +; X86-NEXT: andl $96, %esi +; X86-NEXT: shrl $3, %esi +; X86-NEXT: movl (%ecx,%esi), %edi +; X86-NEXT: btl %edx, %edi +; X86-NEXT: setae %al +; X86-NEXT: btrl %edx, %edi +; X86-NEXT: movl %edi, (%ecx,%esi) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl ; -; SSE-LABEL: reset_eq_i128: -; SSE: # %bb.0: -; SSE-NEXT: movl %esi, %ecx -; SSE-NEXT: movl $1, %edx -; SSE-NEXT: xorl %esi, %esi -; SSE-NEXT: shldq %cl, %rdx, %rsi -; SSE-NEXT: xorl %eax, %eax -; SSE-NEXT: shlq %cl, %rdx -; SSE-NEXT: testb $64, %cl -; SSE-NEXT: cmovneq %rdx, %rsi -; SSE-NEXT: cmovneq %rax, %rdx -; SSE-NEXT: movq (%rdi), %rax -; SSE-NEXT: movq 8(%rdi), %rcx -; SSE-NEXT: movq %rcx, %r8 -; SSE-NEXT: andq %rsi, %r8 -; SSE-NEXT: notq %rsi -; SSE-NEXT: movq %rax, %r9 -; SSE-NEXT: andq %rdx, %r9 -; SSE-NEXT: notq %rdx -; SSE-NEXT: andq %rcx, %rsi -; SSE-NEXT: andq %rax, %rdx -; SSE-NEXT: orq %r8, %r9 -; SSE-NEXT: sete %al -; SSE-NEXT: movq %rdx, (%rdi) -; SSE-NEXT: movq %rsi, 8(%rdi) -; SSE-NEXT: retq -; -; AVX-LABEL: reset_eq_i128: -; AVX: # %bb.0: -; AVX-NEXT: movl %esi, %ecx -; AVX-NEXT: xorl %eax, %eax -; AVX-NEXT: movl $1, %edx -; AVX-NEXT: xorl %esi, %esi -; AVX-NEXT: shldq %cl, %rdx, %rsi -; AVX-NEXT: shlxq %rcx, %rdx, %rdx -; AVX-NEXT: testb $64, %cl -; AVX-NEXT: cmovneq %rdx, %rsi -; AVX-NEXT: cmovneq %rax, %rdx -; AVX-NEXT: movq (%rdi), %rax -; AVX-NEXT: movq 8(%rdi), %rcx -; AVX-NEXT: andnq %rcx, %rsi, %r8 -; AVX-NEXT: andq %rsi, %rcx -; AVX-NEXT: andnq %rax, %rdx, %rsi -; AVX-NEXT: andq %rdx, %rax -; AVX-NEXT: orq %rcx, %rax -; AVX-NEXT: sete %al -; AVX-NEXT: movq %rsi, (%rdi) -; AVX-NEXT: movq %r8, 8(%rdi) -; AVX-NEXT: retq +; X64-LABEL: reset_eq_i128: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: andl $96, %ecx +; X64-NEXT: shrl $3, %ecx +; X64-NEXT: movl (%rdi,%rcx), %edx +; X64-NEXT: btl %esi, %edx +; X64-NEXT: setae %al +; X64-NEXT: btrl %esi, %edx +; X64-NEXT: movl %edx, (%rdi,%rcx) +; X64-NEXT: retq %rem = and i32 %position, 127 %ofs = zext nneg i32 %rem to i128 %bit = shl nuw i128 1, %ofs @@ -888,124 +559,33 @@ define i1 @reset_eq_i128(ptr %word, i32 %position) nounwind { define i1 @set_ne_i128(ptr %word, i32 %position) nounwind { ; X86-LABEL: set_ne_i128: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: movl %esp, %ebp -; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: andl $-16, %esp -; X86-NEXT: subl $80, %esp -; X86-NEXT: movzbl 12(%ebp), %ecx -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $1, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: shrb $3, %al -; X86-NEXT: andb $12, %al -; X86-NEXT: negb %al -; X86-NEXT: movsbl %al, %eax -; X86-NEXT: movl 56(%esp,%eax), %esi -; X86-NEXT: movl 60(%esp,%eax), %edx -; X86-NEXT: shldl %cl, %esi, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 48(%esp,%eax), %edi -; X86-NEXT: movl 52(%esp,%eax), %ebx -; X86-NEXT: shldl %cl, %ebx, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edi, %ebx -; X86-NEXT: movl 8(%ebp), %eax -; X86-NEXT: shll %cl, %edi -; X86-NEXT: movl %eax, %ecx -; X86-NEXT: movl 8(%eax), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %esi, %eax -; X86-NEXT: movl (%ecx), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %ecx, %esi -; X86-NEXT: movl %edx, %ecx -; X86-NEXT: andl %edi, %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl 12(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 4(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ebx, %eax -; X86-NEXT: orl %edx, %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: orl %ecx, %eax -; X86-NEXT: movl 8(%ebp), %eax -; X86-NEXT: movl %edx, 8(%eax) -; X86-NEXT: movl %esi, 12(%eax) -; X86-NEXT: movl %edi, (%eax) -; X86-NEXT: movl %ebx, 4(%eax) -; X86-NEXT: setne %al -; X86-NEXT: leal -12(%ebp), %esp +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %edx, %esi +; X86-NEXT: andl $96, %esi +; X86-NEXT: shrl $3, %esi +; X86-NEXT: movl (%ecx,%esi), %edi +; X86-NEXT: btl %edx, %edi +; X86-NEXT: setb %al +; X86-NEXT: btsl %edx, %edi +; X86-NEXT: movl %edi, (%ecx,%esi) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl ; -; SSE-LABEL: set_ne_i128: -; SSE: # %bb.0: -; SSE-NEXT: movl %esi, %ecx -; SSE-NEXT: movl $1, %edx -; SSE-NEXT: xorl %esi, %esi -; SSE-NEXT: shldq %cl, %rdx, %rsi -; SSE-NEXT: shlq %cl, %rdx -; SSE-NEXT: xorl %eax, %eax -; SSE-NEXT: testb $64, %cl -; SSE-NEXT: cmovneq %rdx, %rsi -; SSE-NEXT: cmovneq %rax, %rdx -; SSE-NEXT: movq (%rdi), %rax -; SSE-NEXT: movq 8(%rdi), %rcx -; SSE-NEXT: movq %rcx, %r8 -; SSE-NEXT: andq %rsi, %r8 -; SSE-NEXT: movq %rax, %r9 -; SSE-NEXT: andq %rdx, %r9 -; SSE-NEXT: orq %rcx, %rsi -; SSE-NEXT: orq %rax, %rdx -; SSE-NEXT: orq %r8, %r9 -; SSE-NEXT: setne %al -; SSE-NEXT: movq %rdx, (%rdi) -; SSE-NEXT: movq %rsi, 8(%rdi) -; SSE-NEXT: retq -; -; AVX-LABEL: set_ne_i128: -; AVX: # %bb.0: -; AVX-NEXT: movl %esi, %ecx -; AVX-NEXT: xorl %eax, %eax -; AVX-NEXT: movl $1, %edx -; AVX-NEXT: xorl %esi, %esi -; AVX-NEXT: shldq %cl, %rdx, %rsi -; AVX-NEXT: shlxq %rcx, %rdx, %rdx -; AVX-NEXT: testb $64, %cl -; AVX-NEXT: cmovneq %rdx, %rsi -; AVX-NEXT: cmovneq %rax, %rdx -; AVX-NEXT: movq (%rdi), %rax -; AVX-NEXT: movq 8(%rdi), %rcx -; AVX-NEXT: movq %rcx, %r8 -; AVX-NEXT: andq %rsi, %r8 -; AVX-NEXT: movq %rax, %r9 -; AVX-NEXT: andq %rdx, %r9 -; AVX-NEXT: orq %rcx, %rsi -; AVX-NEXT: orq %rax, %rdx -; AVX-NEXT: orq %r8, %r9 -; AVX-NEXT: setne %al -; AVX-NEXT: movq %rdx, (%rdi) -; AVX-NEXT: movq %rsi, 8(%rdi) -; AVX-NEXT: retq +; X64-LABEL: set_ne_i128: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: andl $96, %ecx +; X64-NEXT: shrl $3, %ecx +; X64-NEXT: movl (%rdi,%rcx), %edx +; X64-NEXT: btl %esi, %edx +; X64-NEXT: setb %al +; X64-NEXT: btsl %esi, %edx +; X64-NEXT: movl %edx, (%rdi,%rcx) +; X64-NEXT: retq %rem = and i32 %position, 127 %ofs = zext nneg i32 %rem to i128 %bit = shl nuw i128 1, %ofs @@ -1026,9 +606,9 @@ define i1 @init_eq_i128(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi ; X86-NEXT: andl $-16, %esp -; X86-NEXT: subl $128, %esp -; X86-NEXT: movzbl 12(%ebp), %ecx -; X86-NEXT: movzbl 16(%ebp), %eax +; X86-NEXT: subl $96, %esp +; X86-NEXT: movl 12(%ebp), %ecx +; X86-NEXT: movzbl 16(%ebp), %ebx ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) @@ -1037,25 +617,29 @@ define i1 @init_eq_i128(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl %ecx, %edx -; X86-NEXT: shrb $3, %dl -; X86-NEXT: andb $12, %dl -; X86-NEXT: negb %dl -; X86-NEXT: movsbl %dl, %esi -; X86-NEXT: movl 64(%esp,%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 68(%esp,%esi), %edx +; X86-NEXT: movl %ecx, %eax +; X86-NEXT: shrb $3, %al +; X86-NEXT: andb $12, %al +; X86-NEXT: negb %al +; X86-NEXT: movsbl %al, %edi +; X86-NEXT: movl 72(%esp,%edi), %edx +; X86-NEXT: movl 76(%esp,%edi), %esi +; X86-NEXT: movzbl %bl, %eax +; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl 64(%esp,%edi), %ebx +; X86-NEXT: movl %ebx, (%esp) # 4-byte Spill +; X86-NEXT: movl 68(%esp,%edi), %ebx +; X86-NEXT: movl %eax, {{[0-9]+}}(%esp) +; X86-NEXT: shldl %cl, %edx, %esi +; X86-NEXT: shldl %cl, %ebx, %edx ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 72(%esp,%esi), %ebx +; X86-NEXT: movl (%esp), %eax # 4-byte Reload +; X86-NEXT: shldl %cl, %eax, %ebx +; X86-NEXT: # kill: def $cl killed $cl killed $ecx +; X86-NEXT: shll %cl, %eax +; X86-NEXT: movl %eax, (%esp) # 4-byte Spill +; X86-NEXT: notl %esi ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movzbl %al, %eax -; X86-NEXT: movl 76(%esp,%esi), %edi -; X86-NEXT: movl %eax, {{[0-9]+}}(%esp) -; X86-NEXT: movl %ebx, %eax -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: shldl %cl, %ebx, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: shll %cl, %edx ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) @@ -1063,72 +647,53 @@ define i1 @init_eq_i128(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl 8(%ebp), %esi -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%esi), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ebx, %eax -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl (%esi), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ebx, %edx -; X86-NEXT: orl %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %edi, %esi +; X86-NEXT: movl 40(%esp,%eax), %edi +; X86-NEXT: movl 44(%esp,%eax), %esi +; X86-NEXT: movl 12(%ebp), %ecx +; X86-NEXT: # kill: def $cl killed $cl killed $ecx +; X86-NEXT: shldl %cl, %edi, %esi ; X86-NEXT: movl 8(%ebp), %ecx -; X86-NEXT: movl 12(%ecx), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ebx, %edi -; X86-NEXT: movl %eax, %ebx -; X86-NEXT: movl %eax, %edx -; X86-NEXT: movl 4(%ecx), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ecx, %ebx -; X86-NEXT: orl %edi, %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: notl %ecx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl 100(%esp,%ecx), %edi -; X86-NEXT: movl 104(%esp,%ecx), %ecx -; X86-NEXT: movl %ecx, %ebx -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movzbl 12(%ebp), %ecx -; X86-NEXT: shldl %cl, %edi, %ebx -; X86-NEXT: orl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: notl %esi -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: movl 108(%esp,%ebx), %ebx -; X86-NEXT: shldl %cl, %eax, %ebx -; X86-NEXT: orl %ebx, %esi +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; X86-NEXT: andl 12(%ecx), %eax +; X86-NEXT: orl %esi, %eax +; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload ; X86-NEXT: notl %eax -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: movl 96(%esp,%ebx), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shll %cl, %ebx -; X86-NEXT: orl %ebx, %eax +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload +; X86-NEXT: movl 36(%esp,%esi), %esi +; X86-NEXT: movl 12(%ebp), %ecx +; X86-NEXT: shldl %cl, %esi, %edi +; X86-NEXT: movl 8(%ebp), %edx +; X86-NEXT: andl 8(%edx), %eax +; X86-NEXT: orl %edi, %eax +; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: notl %ebx +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; X86-NEXT: movl 32(%esp,%eax), %eax +; X86-NEXT: shldl %cl, %eax, %esi +; X86-NEXT: movl 8(%ebp), %edi +; X86-NEXT: andl 4(%edi), %ebx +; X86-NEXT: orl %esi, %ebx +; X86-NEXT: movl (%esp), %edx # 4-byte Reload ; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: shldl %cl, %ebx, %edi -; X86-NEXT: orl %edi, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload -; X86-NEXT: movl 8(%ebp), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 8(%ecx) -; X86-NEXT: movl %esi, 12(%ecx) -; X86-NEXT: movl %eax, (%ecx) -; X86-NEXT: movl %edx, 4(%ecx) -; X86-NEXT: sete %al +; X86-NEXT: # kill: def $cl killed $cl killed $ecx +; X86-NEXT: shll %cl, %eax +; X86-NEXT: andl (%edi), %edx +; X86-NEXT: orl %eax, %edx +; X86-NEXT: movl 12(%ebp), %ecx +; X86-NEXT: movl %ecx, %eax +; X86-NEXT: andl $96, %eax +; X86-NEXT: shrl $3, %eax +; X86-NEXT: movl (%edi,%eax), %eax +; X86-NEXT: btl %ecx, %eax +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; X86-NEXT: movl %eax, 12(%edi) +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; X86-NEXT: movl %eax, 8(%edi) +; X86-NEXT: movl %ebx, 4(%edi) +; X86-NEXT: movl %edx, (%edi) +; X86-NEXT: setae %al ; X86-NEXT: leal -12(%ebp), %esp ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi @@ -1151,86 +716,84 @@ define i1 @init_eq_i128(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; SSE-NEXT: testb $64, %cl ; SSE-NEXT: cmovneq %rsi, %r8 ; SSE-NEXT: cmovneq %r9, %rsi +; SSE-NEXT: notq %r8 ; SSE-NEXT: cmovneq %rax, %rdx ; SSE-NEXT: cmovneq %r9, %rax -; SSE-NEXT: movq (%rdi), %rcx -; SSE-NEXT: movq 8(%rdi), %r9 -; SSE-NEXT: movq %r9, %r10 -; SSE-NEXT: andq %r8, %r10 -; SSE-NEXT: notq %r8 -; SSE-NEXT: movq %rcx, %r11 -; SSE-NEXT: andq %rsi, %r11 ; SSE-NEXT: notq %rsi -; SSE-NEXT: andq %r9, %r8 +; SSE-NEXT: andq 8(%rdi), %r8 ; SSE-NEXT: orq %rdx, %r8 -; SSE-NEXT: andq %rcx, %rsi +; SSE-NEXT: andq (%rdi), %rsi ; SSE-NEXT: orq %rax, %rsi -; SSE-NEXT: orq %r10, %r11 -; SSE-NEXT: sete %al -; SSE-NEXT: movq %rsi, (%rdi) +; SSE-NEXT: movl %ecx, %eax +; SSE-NEXT: andl $96, %eax +; SSE-NEXT: shrl $3, %eax +; SSE-NEXT: movl (%rdi,%rax), %eax +; SSE-NEXT: btl %ecx, %eax +; SSE-NEXT: setae %al ; SSE-NEXT: movq %r8, 8(%rdi) +; SSE-NEXT: movq %rsi, (%rdi) ; SSE-NEXT: retq ; ; AVX2-LABEL: init_eq_i128: ; AVX2: # %bb.0: ; AVX2-NEXT: movl %esi, %ecx -; AVX2-NEXT: movl $1, %esi -; AVX2-NEXT: xorl %eax, %eax -; AVX2-NEXT: shldq %cl, %rsi, %rax -; AVX2-NEXT: xorl %r8d, %r8d +; AVX2-NEXT: movl $1, %eax +; AVX2-NEXT: xorl %esi, %esi +; AVX2-NEXT: shldq %cl, %rax, %rsi ; AVX2-NEXT: movl %edx, %edx +; AVX2-NEXT: xorl %r8d, %r8d +; AVX2-NEXT: shldq %cl, %rdx, %r8 ; AVX2-NEXT: xorl %r9d, %r9d -; AVX2-NEXT: shldq %cl, %rdx, %r9 -; AVX2-NEXT: shlxq %rcx, %rsi, %rsi +; AVX2-NEXT: shlxq %rcx, %rax, %rax ; AVX2-NEXT: testb $64, %cl -; AVX2-NEXT: cmovneq %rsi, %rax -; AVX2-NEXT: cmovneq %r8, %rsi -; AVX2-NEXT: shlxq %rcx, %rdx, %rcx -; AVX2-NEXT: cmovneq %rcx, %r9 -; AVX2-NEXT: cmovneq %r8, %rcx -; AVX2-NEXT: movq (%rdi), %rdx -; AVX2-NEXT: movq 8(%rdi), %r8 -; AVX2-NEXT: andnq %r8, %rax, %r10 -; AVX2-NEXT: andq %rax, %r8 -; AVX2-NEXT: andnq %rdx, %rsi, %r11 -; AVX2-NEXT: andq %rsi, %rdx -; AVX2-NEXT: orq %r9, %r10 -; AVX2-NEXT: orq %rcx, %r11 -; AVX2-NEXT: orq %r8, %rdx -; AVX2-NEXT: sete %al -; AVX2-NEXT: movq %r11, (%rdi) -; AVX2-NEXT: movq %r10, 8(%rdi) +; AVX2-NEXT: cmovneq %rax, %rsi +; AVX2-NEXT: cmovneq %r9, %rax +; AVX2-NEXT: shlxq %rcx, %rdx, %rdx +; AVX2-NEXT: cmovneq %rdx, %r8 +; AVX2-NEXT: cmovneq %r9, %rdx +; AVX2-NEXT: andnq 8(%rdi), %rsi, %rsi +; AVX2-NEXT: orq %r8, %rsi +; AVX2-NEXT: andnq (%rdi), %rax, %r8 +; AVX2-NEXT: orq %rdx, %r8 +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: andl $96, %eax +; AVX2-NEXT: shrl $3, %eax +; AVX2-NEXT: movl (%rdi,%rax), %eax +; AVX2-NEXT: btl %ecx, %eax +; AVX2-NEXT: setae %al +; AVX2-NEXT: movq %rsi, 8(%rdi) +; AVX2-NEXT: movq %r8, (%rdi) ; AVX2-NEXT: retq ; ; AVX512-LABEL: init_eq_i128: ; AVX512: # %bb.0: ; AVX512-NEXT: movl %esi, %ecx -; AVX512-NEXT: xorl %eax, %eax -; AVX512-NEXT: movl $1, %esi +; AVX512-NEXT: movl $1, %eax +; AVX512-NEXT: xorl %esi, %esi +; AVX512-NEXT: shldq %cl, %rax, %rsi ; AVX512-NEXT: xorl %r8d, %r8d -; AVX512-NEXT: shldq %cl, %rsi, %r8 -; AVX512-NEXT: shlxq %rcx, %rsi, %rsi +; AVX512-NEXT: shlxq %rcx, %rax, %rax ; AVX512-NEXT: movl %edx, %edx ; AVX512-NEXT: xorl %r9d, %r9d ; AVX512-NEXT: shldq %cl, %rdx, %r9 ; AVX512-NEXT: testb $64, %cl -; AVX512-NEXT: cmovneq %rsi, %r8 ; AVX512-NEXT: cmovneq %rax, %rsi -; AVX512-NEXT: shlxq %rcx, %rdx, %rcx -; AVX512-NEXT: cmovneq %rcx, %r9 -; AVX512-NEXT: cmovneq %rax, %rcx -; AVX512-NEXT: movq (%rdi), %rax -; AVX512-NEXT: movq 8(%rdi), %rdx -; AVX512-NEXT: andnq %rdx, %r8, %r10 -; AVX512-NEXT: andq %r8, %rdx -; AVX512-NEXT: andnq %rax, %rsi, %r8 -; AVX512-NEXT: andq %rsi, %rax -; AVX512-NEXT: orq %r9, %r10 -; AVX512-NEXT: orq %rcx, %r8 -; AVX512-NEXT: orq %rdx, %rax -; AVX512-NEXT: sete %al +; AVX512-NEXT: cmovneq %r8, %rax +; AVX512-NEXT: shlxq %rcx, %rdx, %rdx +; AVX512-NEXT: cmovneq %rdx, %r9 +; AVX512-NEXT: cmovneq %r8, %rdx +; AVX512-NEXT: andnq 8(%rdi), %rsi, %rsi +; AVX512-NEXT: orq %r9, %rsi +; AVX512-NEXT: andnq (%rdi), %rax, %r8 +; AVX512-NEXT: orq %rdx, %r8 +; AVX512-NEXT: movl %ecx, %eax +; AVX512-NEXT: andl $96, %eax +; AVX512-NEXT: shrl $3, %eax +; AVX512-NEXT: movl (%rdi,%rax), %eax +; AVX512-NEXT: btl %ecx, %eax +; AVX512-NEXT: setae %al +; AVX512-NEXT: movq %rsi, 8(%rdi) ; AVX512-NEXT: movq %r8, (%rdi) -; AVX512-NEXT: movq %r10, 8(%rdi) ; AVX512-NEXT: retq %rem = and i32 %position, 127 %ofs = zext nneg i32 %rem to i128 @@ -1252,344 +815,25 @@ define i1 @init_eq_i128(ptr %word, i32 %position, i1 zeroext %value) nounwind { define i1 @test_ne_i512(ptr %word, i32 %position) nounwind { ; X86-LABEL: test_ne_i512: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: movl %esp, %ebp -; X86-NEXT: pushl %ebx -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: andl $-16, %esp -; X86-NEXT: subl $224, %esp -; X86-NEXT: movl 12(%ebp), %ecx -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: shrl $3, %eax -; X86-NEXT: andl $60, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: leal {{[0-9]+}}(%esp), %edx -; X86-NEXT: subl %eax, %edx -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $1, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl 24(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 28(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl $31, %ecx -; X86-NEXT: shldl %cl, %esi, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 56(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 60(%edx), %eax -; X86-NEXT: shldl %cl, %esi, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 12(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %esi, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 40(%edx), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 44(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edi, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 16(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 20(%edx), %edi -; X86-NEXT: movl %edi, %ebx -; X86-NEXT: shldl %cl, %eax, %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 32(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 36(%edx), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %esi, %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl 52(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl 4(%edx), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: shldl %cl, %edi, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: shldl %cl, %edi, %eax -; X86-NEXT: movl 8(%ebp), %ebx -; X86-NEXT: andl 40(%ebx), %eax -; X86-NEXT: andl 8(%ebx), %esi -; X86-NEXT: orl %eax, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 56(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: andl 24(%ebx), %edi -; X86-NEXT: orl %eax, %edi -; X86-NEXT: orl %esi, %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %ebx, %edi -; X86-NEXT: andl 44(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl 12(%ebx), %esi -; X86-NEXT: orl %eax, %esi -; X86-NEXT: movl %esi, %ebx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl 60(%edi), %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 28(%edi), %eax -; X86-NEXT: orl %esi, %eax -; X86-NEXT: orl %ebx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl (%edx), %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: shldl %cl, %edx, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: negl %edx -; X86-NEXT: movl 192(%esp,%edx), %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: shldl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: shldl %cl, %ebx, %edx -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: shll %cl, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl 8(%ebp), %ebx -; X86-NEXT: andl 32(%ebx), %ecx -; X86-NEXT: andl (%ebx), %esi -; X86-NEXT: orl %ecx, %esi -; X86-NEXT: andl 16(%ebx), %edi -; X86-NEXT: andl 48(%ebx), %edx -; X86-NEXT: orl %edi, %edx -; X86-NEXT: orl %esi, %edx -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 36(%ebx), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl 4(%ebx), %esi -; X86-NEXT: orl %ecx, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 20(%ebx), %ecx -; X86-NEXT: andl 52(%ebx), %eax -; X86-NEXT: orl %ecx, %eax -; X86-NEXT: orl %esi, %eax -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: orl %edx, %eax -; X86-NEXT: setne %al -; X86-NEXT: leal -12(%ebp), %esp -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl %ecx, %edx +; X86-NEXT: shrl $3, %edx +; X86-NEXT: andl $60, %edx +; X86-NEXT: movl (%eax,%edx), %eax +; X86-NEXT: btl %ecx, %eax +; X86-NEXT: setb %al ; X86-NEXT: retl ; -; SSE-LABEL: test_ne_i512: -; SSE: # %bb.0: -; SSE-NEXT: pushq %r15 -; SSE-NEXT: pushq %r14 -; SSE-NEXT: pushq %rbx -; SSE-NEXT: xorps %xmm0, %xmm0 -; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movq $0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movq $1, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movl %esi, %ecx -; SSE-NEXT: andl $63, %ecx -; SSE-NEXT: shrl $3, %esi -; SSE-NEXT: andl $56, %esi -; SSE-NEXT: negl %esi -; SSE-NEXT: movslq %esi, %rbx -; SSE-NEXT: movq -48(%rsp,%rbx), %rdx -; SSE-NEXT: movq -40(%rsp,%rbx), %r14 -; SSE-NEXT: movq %r14, %rax -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq -16(%rsp,%rbx), %r11 -; SSE-NEXT: movq -8(%rsp,%rbx), %r10 -; SSE-NEXT: shldq %cl, %r11, %r10 -; SSE-NEXT: movq -32(%rsp,%rbx), %r9 -; SSE-NEXT: movq -24(%rsp,%rbx), %r15 -; SSE-NEXT: movq %r15, %r8 -; SSE-NEXT: shldq %cl, %r9, %r8 -; SSE-NEXT: movq -56(%rsp,%rbx), %rsi -; SSE-NEXT: shldq %cl, %rsi, %rdx -; SSE-NEXT: shldq %cl, %r15, %r11 -; SSE-NEXT: shldq %cl, %r14, %r9 -; SSE-NEXT: movq -64(%rsp,%rbx), %rbx -; SSE-NEXT: shldq %cl, %rbx, %rsi -; SSE-NEXT: # kill: def $cl killed $cl killed $ecx -; SSE-NEXT: shlq %cl, %rbx -; SSE-NEXT: andq 32(%rdi), %r9 -; SSE-NEXT: andq 48(%rdi), %r11 -; SSE-NEXT: andq 16(%rdi), %rdx -; SSE-NEXT: orq %r11, %rdx -; SSE-NEXT: andq 40(%rdi), %r8 -; SSE-NEXT: andq 56(%rdi), %r10 -; SSE-NEXT: andq 24(%rdi), %rax -; SSE-NEXT: orq %r10, %rax -; SSE-NEXT: andq (%rdi), %rbx -; SSE-NEXT: orq %r9, %rbx -; SSE-NEXT: orq %rdx, %rbx -; SSE-NEXT: andq 8(%rdi), %rsi -; SSE-NEXT: orq %r8, %rsi -; SSE-NEXT: orq %rax, %rsi -; SSE-NEXT: orq %rbx, %rsi -; SSE-NEXT: setne %al -; SSE-NEXT: popq %rbx -; SSE-NEXT: popq %r14 -; SSE-NEXT: popq %r15 -; SSE-NEXT: retq -; -; AVX2-LABEL: test_ne_i512: -; AVX2: # %bb.0: -; AVX2-NEXT: pushq %r15 -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovss {{.*#+}} xmm0 = [1,0,0,0] -; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movl %esi, %ecx -; AVX2-NEXT: andl $63, %ecx -; AVX2-NEXT: shrl $3, %esi -; AVX2-NEXT: andl $56, %esi -; AVX2-NEXT: negl %esi -; AVX2-NEXT: movslq %esi, %rsi -; AVX2-NEXT: movq -48(%rsp,%rsi), %rdx -; AVX2-NEXT: movq -40(%rsp,%rsi), %rbx -; AVX2-NEXT: movq %rbx, %rax -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq -16(%rsp,%rsi), %r11 -; AVX2-NEXT: movq -8(%rsp,%rsi), %r10 -; AVX2-NEXT: shldq %cl, %r11, %r10 -; AVX2-NEXT: movq -32(%rsp,%rsi), %r9 -; AVX2-NEXT: movq -24(%rsp,%rsi), %r14 -; AVX2-NEXT: movq %r14, %r8 -; AVX2-NEXT: shldq %cl, %r9, %r8 -; AVX2-NEXT: movq -64(%rsp,%rsi), %r15 -; AVX2-NEXT: movq -56(%rsp,%rsi), %rsi -; AVX2-NEXT: shldq %cl, %rsi, %rdx -; AVX2-NEXT: shldq %cl, %r14, %r11 -; AVX2-NEXT: shldq %cl, %rbx, %r9 -; AVX2-NEXT: shldq %cl, %r15, %rsi -; AVX2-NEXT: shlxq %rcx, %r15, %rcx -; AVX2-NEXT: andq 32(%rdi), %r9 -; AVX2-NEXT: andq 48(%rdi), %r11 -; AVX2-NEXT: andq 16(%rdi), %rdx -; AVX2-NEXT: andq 40(%rdi), %r8 -; AVX2-NEXT: andq 56(%rdi), %r10 -; AVX2-NEXT: andq 24(%rdi), %rax -; AVX2-NEXT: orq %r11, %rdx -; AVX2-NEXT: orq %r10, %rax -; AVX2-NEXT: andq (%rdi), %rcx -; AVX2-NEXT: orq %r9, %rcx -; AVX2-NEXT: orq %rdx, %rcx -; AVX2-NEXT: andq 8(%rdi), %rsi -; AVX2-NEXT: orq %r8, %rsi -; AVX2-NEXT: orq %rax, %rsi -; AVX2-NEXT: orq %rcx, %rsi -; AVX2-NEXT: setne %al -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: popq %r15 -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512-LABEL: test_ne_i512: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %r15 -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [1,0,0,0] -; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: movl %esi, %ecx -; AVX512-NEXT: andl $63, %ecx -; AVX512-NEXT: shrl $3, %esi -; AVX512-NEXT: andl $56, %esi -; AVX512-NEXT: negl %esi -; AVX512-NEXT: movslq %esi, %rbx -; AVX512-NEXT: movq -48(%rsp,%rbx), %rdx -; AVX512-NEXT: movq -40(%rsp,%rbx), %r14 -; AVX512-NEXT: movq %r14, %rax -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq -16(%rsp,%rbx), %r11 -; AVX512-NEXT: movq -8(%rsp,%rbx), %r10 -; AVX512-NEXT: shldq %cl, %r11, %r10 -; AVX512-NEXT: movq -32(%rsp,%rbx), %r9 -; AVX512-NEXT: movq -24(%rsp,%rbx), %r15 -; AVX512-NEXT: movq %r15, %r8 -; AVX512-NEXT: shldq %cl, %r9, %r8 -; AVX512-NEXT: movq -56(%rsp,%rbx), %rsi -; AVX512-NEXT: shldq %cl, %rsi, %rdx -; AVX512-NEXT: shldq %cl, %r15, %r11 -; AVX512-NEXT: shldq %cl, %r14, %r9 -; AVX512-NEXT: movq -64(%rsp,%rbx), %rbx -; AVX512-NEXT: shldq %cl, %rbx, %rsi -; AVX512-NEXT: shlxq %rcx, %rbx, %rcx -; AVX512-NEXT: andq 32(%rdi), %r9 -; AVX512-NEXT: andq 48(%rdi), %r11 -; AVX512-NEXT: andq 16(%rdi), %rdx -; AVX512-NEXT: andq 40(%rdi), %r8 -; AVX512-NEXT: andq 56(%rdi), %r10 -; AVX512-NEXT: andq 24(%rdi), %rax -; AVX512-NEXT: orq %r11, %rdx -; AVX512-NEXT: orq %r10, %rax -; AVX512-NEXT: andq (%rdi), %rcx -; AVX512-NEXT: orq %r9, %rcx -; AVX512-NEXT: orq %rdx, %rcx -; AVX512-NEXT: andq 8(%rdi), %rsi -; AVX512-NEXT: orq %r8, %rsi -; AVX512-NEXT: orq %rax, %rsi -; AVX512-NEXT: orq %rcx, %rsi -; AVX512-NEXT: setne %al -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: popq %r15 -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; X64-LABEL: test_ne_i512: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %eax +; X64-NEXT: shrl $3, %eax +; X64-NEXT: andl $60, %eax +; X64-NEXT: movl (%rdi,%rax), %eax +; X64-NEXT: btl %esi, %eax +; X64-NEXT: setb %al +; X64-NEXT: retq %rem = and i32 %position, 511 %ofs = zext nneg i32 %rem to i512 %bit = shl nuw i512 1, %ofs @@ -1602,572 +846,33 @@ define i1 @test_ne_i512(ptr %word, i32 %position) nounwind { define i1 @complement_ne_i512(ptr %word, i32 %position) nounwind { ; X86-LABEL: complement_ne_i512: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: movl %esp, %ebp -; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: andl $-16, %esp -; X86-NEXT: subl $272, %esp # imm = 0x110 -; X86-NEXT: movl 12(%ebp), %ecx -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: shrl $3, %eax -; X86-NEXT: andl $60, %eax -; X86-NEXT: movl %eax, (%esp) # 4-byte Spill -; X86-NEXT: leal {{[0-9]+}}(%esp), %edx -; X86-NEXT: subl %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $1, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl 24(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 28(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl $31, %ecx -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 56(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 60(%edx), %esi -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 12(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 40(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 44(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 16(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 20(%edx), %ebx -; X86-NEXT: movl %ebx, %esi -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 32(%edx), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 36(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edi, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl 52(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: shldl %cl, %esi, %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 4(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%ebp), %edx -; X86-NEXT: movl 40(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %esi, %eax -; X86-NEXT: movl 8(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ebx, %esi -; X86-NEXT: orl %eax, %esi -; X86-NEXT: movl %edx, %eax -; X86-NEXT: movl 56(%edx), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edi, %ebx -; X86-NEXT: movl 24(%edx), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: orl %ebx, %edx -; X86-NEXT: orl %esi, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 44(%eax), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: movl 12(%eax), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload -; X86-NEXT: orl %esi, %ebx -; X86-NEXT: movl 60(%eax), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %eax, %esi -; X86-NEXT: movl %edx, %eax -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl 28(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: orl %eax, %edx -; X86-NEXT: orl %ebx, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl (%eax), %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl (%esp), %eax # 4-byte Reload -; X86-NEXT: negl %eax -; X86-NEXT: movl 240(%esp,%eax), %esi -; X86-NEXT: shldl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, (%esp) # 4-byte Spill -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: shll %cl, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%ebp), %esi -; X86-NEXT: movl 32(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edi, %eax -; X86-NEXT: movl (%esi), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edx, %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl 16(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ebx, %eax -; X86-NEXT: movl 48(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl (%esp), %edx # 4-byte Folded Reload -; X86-NEXT: orl %eax, %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 36(%esi), %ebx -; X86-NEXT: movl %ebx, %eax -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl 4(%esi), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl %esi, %eax -; X86-NEXT: movl 20(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl %esi, %edi -; X86-NEXT: movl 52(%eax), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: orl %edi, %eax -; X86-NEXT: orl %ecx, %eax -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: xorl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: xorl %ecx, (%esp) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload -; X86-NEXT: orl %edx, %eax -; X86-NEXT: movl 8(%ebp), %edx -; X86-NEXT: movl %ebx, 60(%edx) -; X86-NEXT: movl %edi, 56(%edx) -; X86-NEXT: movl %ecx, 52(%edx) -; X86-NEXT: movl %esi, 44(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 40(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 36(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 32(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 28(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 24(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 20(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 16(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 12(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 8(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 4(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, (%edx) -; X86-NEXT: movl (%esp), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 48(%edx) -; X86-NEXT: setne %al -; X86-NEXT: leal -12(%ebp), %esp +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %edx, %esi +; X86-NEXT: shrl $3, %esi +; X86-NEXT: andl $60, %esi +; X86-NEXT: movl (%ecx,%esi), %edi +; X86-NEXT: btl %edx, %edi +; X86-NEXT: setb %al +; X86-NEXT: btcl %edx, %edi +; X86-NEXT: movl %edi, (%ecx,%esi) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl ; -; SSE-LABEL: complement_ne_i512: -; SSE: # %bb.0: -; SSE-NEXT: pushq %rbp -; SSE-NEXT: pushq %r15 -; SSE-NEXT: pushq %r14 -; SSE-NEXT: pushq %r13 -; SSE-NEXT: pushq %r12 -; SSE-NEXT: pushq %rbx -; SSE-NEXT: subq $56, %rsp -; SSE-NEXT: xorps %xmm0, %xmm0 -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movq $0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movq $1, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movl %esi, %ecx -; SSE-NEXT: andl $63, %ecx -; SSE-NEXT: shrl $3, %esi -; SSE-NEXT: andl $56, %esi -; SSE-NEXT: negl %esi -; SSE-NEXT: movslq %esi, %rbx -; SSE-NEXT: movq (%rsp,%rbx), %rsi -; SSE-NEXT: movq 8(%rsp,%rbx), %r14 -; SSE-NEXT: movq %r14, %rax -; SSE-NEXT: shldq %cl, %rsi, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 32(%rsp,%rbx), %r8 -; SSE-NEXT: movq 40(%rsp,%rbx), %rbp -; SSE-NEXT: shldq %cl, %r8, %rbp -; SSE-NEXT: movq 16(%rsp,%rbx), %r9 -; SSE-NEXT: movq 24(%rsp,%rbx), %r15 -; SSE-NEXT: movq %r15, %r10 -; SSE-NEXT: shldq %cl, %r9, %r10 -; SSE-NEXT: movq -8(%rsp,%rbx), %r11 -; SSE-NEXT: shldq %cl, %r11, %rsi -; SSE-NEXT: shldq %cl, %r15, %r8 -; SSE-NEXT: shldq %cl, %r14, %r9 -; SSE-NEXT: movq -16(%rsp,%rbx), %rbx -; SSE-NEXT: shldq %cl, %rbx, %r11 -; SSE-NEXT: # kill: def $cl killed $cl killed $ecx -; SSE-NEXT: shlq %cl, %rbx -; SSE-NEXT: movq 24(%rdi), %r15 -; SSE-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 56(%rdi), %rcx -; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 16(%rdi), %r12 -; SSE-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 48(%rdi), %r13 -; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %r8, %r13 -; SSE-NEXT: andq %rsi, %r12 -; SSE-NEXT: orq %r13, %r12 -; SSE-NEXT: movq %rcx, %r13 -; SSE-NEXT: andq %rbp, %r13 -; SSE-NEXT: andq %rax, %r15 -; SSE-NEXT: orq %r13, %r15 -; SSE-NEXT: movq 32(%rdi), %r14 -; SSE-NEXT: movq %r14, %rcx -; SSE-NEXT: andq %r9, %rcx -; SSE-NEXT: movq (%rdi), %r13 -; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %rbx, %r13 -; SSE-NEXT: orq %rcx, %r13 -; SSE-NEXT: orq %r12, %r13 -; SSE-NEXT: movq 40(%rdi), %rcx -; SSE-NEXT: movq %rcx, %r12 -; SSE-NEXT: andq %r10, %r12 -; SSE-NEXT: movq 8(%rdi), %rdx -; SSE-NEXT: movq %rdx, %rax -; SSE-NEXT: andq %r11, %rax -; SSE-NEXT: orq %r12, %rax -; SSE-NEXT: orq %r15, %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; SSE-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload -; SSE-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload -; SSE-NEXT: xorq %rcx, %r10 -; SSE-NEXT: xorq %r14, %r9 -; SSE-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload -; SSE-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload -; SSE-NEXT: xorq %rdx, %r11 -; SSE-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload -; SSE-NEXT: orq %r13, %rax -; SSE-NEXT: movq %r8, 48(%rdi) -; SSE-NEXT: movq %rbp, 56(%rdi) -; SSE-NEXT: movq %r9, 32(%rdi) -; SSE-NEXT: movq %r10, 40(%rdi) -; SSE-NEXT: movq %rsi, 16(%rdi) -; SSE-NEXT: movq %r15, 24(%rdi) -; SSE-NEXT: movq %rbx, (%rdi) -; SSE-NEXT: movq %r11, 8(%rdi) -; SSE-NEXT: setne %al -; SSE-NEXT: addq $56, %rsp -; SSE-NEXT: popq %rbx -; SSE-NEXT: popq %r12 -; SSE-NEXT: popq %r13 -; SSE-NEXT: popq %r14 -; SSE-NEXT: popq %r15 -; SSE-NEXT: popq %rbp -; SSE-NEXT: retq -; -; AVX2-LABEL: complement_ne_i512: -; AVX2: # %bb.0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: pushq %r15 -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: pushq %r13 -; AVX2-NEXT: pushq %r12 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: subq $72, %rsp -; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovss {{.*#+}} xmm0 = [1,0,0,0] -; AVX2-NEXT: vmovups %ymm0, (%rsp) -; AVX2-NEXT: movl %esi, %ecx -; AVX2-NEXT: andl $63, %ecx -; AVX2-NEXT: shrl $3, %esi -; AVX2-NEXT: andl $56, %esi -; AVX2-NEXT: negl %esi -; AVX2-NEXT: movslq %esi, %rbx -; AVX2-NEXT: movq 16(%rsp,%rbx), %rsi -; AVX2-NEXT: movq 24(%rsp,%rbx), %rbp -; AVX2-NEXT: movq %rbp, %rax -; AVX2-NEXT: shldq %cl, %rsi, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 48(%rsp,%rbx), %r8 -; AVX2-NEXT: movq 56(%rsp,%rbx), %r13 -; AVX2-NEXT: shldq %cl, %r8, %r13 -; AVX2-NEXT: movq 32(%rsp,%rbx), %r9 -; AVX2-NEXT: movq 40(%rsp,%rbx), %r14 -; AVX2-NEXT: movq %r14, %r10 -; AVX2-NEXT: shldq %cl, %r9, %r10 -; AVX2-NEXT: movq 8(%rsp,%rbx), %r11 -; AVX2-NEXT: shldq %cl, %r11, %rsi -; AVX2-NEXT: shldq %cl, %r14, %r8 -; AVX2-NEXT: movq 16(%rdi), %r12 -; AVX2-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 48(%rdi), %r14 -; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq %r8, %r14 -; AVX2-NEXT: andq %rsi, %r12 -; AVX2-NEXT: orq %r14, %r12 -; AVX2-NEXT: movq 56(%rdi), %r15 -; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq %r13, %r15 -; AVX2-NEXT: movq 24(%rdi), %r14 -; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq %rax, %r14 -; AVX2-NEXT: orq %r15, %r14 -; AVX2-NEXT: shldq %cl, %rbp, %r9 -; AVX2-NEXT: movq (%rsp,%rbx), %rdx -; AVX2-NEXT: movq 32(%rdi), %r15 -; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq %r9, %r15 -; AVX2-NEXT: shlxq %rcx, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq (%rdi), %rbx -; AVX2-NEXT: movq %rbx, %rbp -; AVX2-NEXT: andq %rax, %rbp -; AVX2-NEXT: orq %r15, %rbp -; AVX2-NEXT: orq %r12, %rbp -; AVX2-NEXT: # kill: def $cl killed $cl killed $rcx -; AVX2-NEXT: shldq %cl, %rdx, %r11 -; AVX2-NEXT: movq 40(%rdi), %rax -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: andq %r10, %rcx -; AVX2-NEXT: movq 8(%rdi), %r15 -; AVX2-NEXT: movq %r15, %r12 -; AVX2-NEXT: andq %r11, %r12 -; AVX2-NEXT: orq %rcx, %r12 -; AVX2-NEXT: orq %r14, %r12 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload -; AVX2-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload -; AVX2-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload -; AVX2-NEXT: xorq %rax, %r10 -; AVX2-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload -; AVX2-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload -; AVX2-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload -; AVX2-NEXT: xorq %r15, %r11 -; AVX2-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload -; AVX2-NEXT: orq %rbp, %r12 -; AVX2-NEXT: movq %r8, 48(%rdi) -; AVX2-NEXT: movq %r13, 56(%rdi) -; AVX2-NEXT: movq %r9, 32(%rdi) -; AVX2-NEXT: movq %r10, 40(%rdi) -; AVX2-NEXT: movq %rsi, 16(%rdi) -; AVX2-NEXT: movq %rcx, 24(%rdi) -; AVX2-NEXT: movq %rbx, (%rdi) -; AVX2-NEXT: movq %r11, 8(%rdi) -; AVX2-NEXT: setne %al -; AVX2-NEXT: addq $72, %rsp -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r12 -; AVX2-NEXT: popq %r13 -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: popq %r15 -; AVX2-NEXT: popq %rbp -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512-LABEL: complement_ne_i512: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %rbp -; AVX512-NEXT: pushq %r15 -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %r13 -; AVX512-NEXT: pushq %r12 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: subq $72, %rsp -; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [1,0,0,0] -; AVX512-NEXT: vmovups %ymm0, (%rsp) -; AVX512-NEXT: movl %esi, %ecx -; AVX512-NEXT: andl $63, %ecx -; AVX512-NEXT: shrl $3, %esi -; AVX512-NEXT: andl $56, %esi -; AVX512-NEXT: negl %esi -; AVX512-NEXT: movslq %esi, %rbx -; AVX512-NEXT: movq 16(%rsp,%rbx), %rsi -; AVX512-NEXT: movq 24(%rsp,%rbx), %rbp -; AVX512-NEXT: movq %rbp, %rax -; AVX512-NEXT: shldq %cl, %rsi, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 48(%rsp,%rbx), %r8 -; AVX512-NEXT: movq 56(%rsp,%rbx), %r13 -; AVX512-NEXT: shldq %cl, %r8, %r13 -; AVX512-NEXT: movq 32(%rsp,%rbx), %r9 -; AVX512-NEXT: movq 40(%rsp,%rbx), %r14 -; AVX512-NEXT: movq %r14, %r10 -; AVX512-NEXT: shldq %cl, %r9, %r10 -; AVX512-NEXT: movq 8(%rsp,%rbx), %r11 -; AVX512-NEXT: shldq %cl, %r11, %rsi -; AVX512-NEXT: shldq %cl, %r14, %r8 -; AVX512-NEXT: movq 16(%rdi), %r12 -; AVX512-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 48(%rdi), %r14 -; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: andq %r8, %r14 -; AVX512-NEXT: andq %rsi, %r12 -; AVX512-NEXT: orq %r14, %r12 -; AVX512-NEXT: movq 56(%rdi), %r15 -; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: andq %r13, %r15 -; AVX512-NEXT: movq 24(%rdi), %r14 -; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: andq %rax, %r14 -; AVX512-NEXT: orq %r15, %r14 -; AVX512-NEXT: shldq %cl, %rbp, %r9 -; AVX512-NEXT: movq (%rsp,%rbx), %rdx -; AVX512-NEXT: movq 32(%rdi), %r15 -; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: andq %r9, %r15 -; AVX512-NEXT: shlxq %rcx, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq (%rdi), %rbx -; AVX512-NEXT: movq %rbx, %rbp -; AVX512-NEXT: andq %rax, %rbp -; AVX512-NEXT: orq %r15, %rbp -; AVX512-NEXT: orq %r12, %rbp -; AVX512-NEXT: # kill: def $cl killed $cl killed $rcx -; AVX512-NEXT: shldq %cl, %rdx, %r11 -; AVX512-NEXT: movq 40(%rdi), %rax -; AVX512-NEXT: movq %rax, %rcx -; AVX512-NEXT: andq %r10, %rcx -; AVX512-NEXT: movq 8(%rdi), %r15 -; AVX512-NEXT: movq %r15, %r12 -; AVX512-NEXT: andq %r11, %r12 -; AVX512-NEXT: orq %rcx, %r12 -; AVX512-NEXT: orq %r14, %r12 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload -; AVX512-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload -; AVX512-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload -; AVX512-NEXT: xorq %rax, %r10 -; AVX512-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload -; AVX512-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload -; AVX512-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload -; AVX512-NEXT: xorq %r15, %r11 -; AVX512-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload -; AVX512-NEXT: orq %rbp, %r12 -; AVX512-NEXT: movq %r8, 48(%rdi) -; AVX512-NEXT: movq %r13, 56(%rdi) -; AVX512-NEXT: movq %r9, 32(%rdi) -; AVX512-NEXT: movq %r10, 40(%rdi) -; AVX512-NEXT: movq %rsi, 16(%rdi) -; AVX512-NEXT: movq %rcx, 24(%rdi) -; AVX512-NEXT: movq %rbx, (%rdi) -; AVX512-NEXT: movq %r11, 8(%rdi) -; AVX512-NEXT: setne %al -; AVX512-NEXT: addq $72, %rsp -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r12 -; AVX512-NEXT: popq %r13 -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: popq %r15 -; AVX512-NEXT: popq %rbp -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; X64-LABEL: complement_ne_i512: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: shrl $3, %ecx +; X64-NEXT: andl $60, %ecx +; X64-NEXT: movl (%rdi,%rcx), %edx +; X64-NEXT: btl %esi, %edx +; X64-NEXT: setb %al +; X64-NEXT: btcl %esi, %edx +; X64-NEXT: movl %edx, (%rdi,%rcx) +; X64-NEXT: retq %rem = and i32 %position, 511 %ofs = zext nneg i32 %rem to i512 %bit = shl nuw i512 1, %ofs @@ -2182,606 +887,33 @@ define i1 @complement_ne_i512(ptr %word, i32 %position) nounwind { define i1 @reset_eq_i512(ptr %word, i32 %position) nounwind { ; X86-LABEL: reset_eq_i512: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: movl %esp, %ebp -; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: andl $-16, %esp -; X86-NEXT: subl $288, %esp # imm = 0x120 -; X86-NEXT: movl 12(%ebp), %ecx -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: shrl $3, %eax -; X86-NEXT: andl $60, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: leal {{[0-9]+}}(%esp), %edi -; X86-NEXT: subl %eax, %edi -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $1, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl 4(%edi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%edi), %eax -; X86-NEXT: andl $31, %ecx -; X86-NEXT: movl %eax, %ebx -; X86-NEXT: shldl %cl, %edx, %ebx -; X86-NEXT: movl 12(%edi), %edx -; X86-NEXT: movl %edx, %esi -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 16(%edi), %eax -; X86-NEXT: movl %eax, %esi -; X86-NEXT: shldl %cl, %edx, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 20(%edi), %edx -; X86-NEXT: movl %edx, %esi -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 24(%edi), %eax -; X86-NEXT: movl %eax, %esi -; X86-NEXT: shldl %cl, %edx, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 28(%edi), %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl %edx, %esi -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 32(%edi), %eax -; X86-NEXT: movl %eax, %esi -; X86-NEXT: shldl %cl, %edx, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 36(%edi), %esi -; X86-NEXT: movl %esi, %edx -; X86-NEXT: shldl %cl, %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 40(%edi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %esi, %edx -; X86-NEXT: movl 8(%ebp), %esi -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 40(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %eax, %edx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %eax, %ebx -; X86-NEXT: orl %edx, %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 44(%edi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %edx -; X86-NEXT: movl 52(%edi), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 56(%edi), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: shldl %cl, %esi, %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%ebp), %esi -; X86-NEXT: movl 56(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %eax, %ebx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 24(%esi), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ecx, %eax -; X86-NEXT: orl %ebx, %eax -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %esi, %ebx -; X86-NEXT: movl 44(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %eax, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 12(%esi), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ecx, %eax -; X86-NEXT: orl %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 60(%edi), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: shldl %cl, %esi, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 60(%ebx), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edx, %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 28(%ebx), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edx, %esi -; X86-NEXT: orl %eax, %esi -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl (%edi), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: negl %eax -; X86-NEXT: movl 256(%esp,%eax), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: shldl %cl, %edi, %eax -; X86-NEXT: movl %esi, %edi -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: shll %cl, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %ebx, %esi -; X86-NEXT: movl 32(%ebx), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ecx, %edx -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl (%ebx), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ecx, %edi -; X86-NEXT: orl %edx, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 16(%esi), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ecx, %ebx -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 48(%esi), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ecx, %eax -; X86-NEXT: orl %ebx, %eax -; X86-NEXT: orl %edi, %eax -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: shldl %cl, %edx, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 36(%esi), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ebx, %edx -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 4(%esi), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ebx, %edi -; X86-NEXT: orl %edx, %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: shldl %cl, %edi, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 20(%esi), %edi -; X86-NEXT: andl %edi, %ecx -; X86-NEXT: movl %ecx, %esi -; X86-NEXT: movl %edx, %ecx -; X86-NEXT: movl 8(%ebp), %ebx -; X86-NEXT: movl 52(%ebx), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ebx, %edx -; X86-NEXT: orl %esi, %edx -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: notl %ebx -; X86-NEXT: andl %edi, %ebx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: notl %esi -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: notl %edi -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: notl %edi -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: notl %edi -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: notl %ecx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload -; X86-NEXT: orl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl 8(%ebp), %eax -; X86-NEXT: movl %edx, 60(%eax) -; X86-NEXT: movl %esi, 56(%eax) -; X86-NEXT: movl %ecx, 52(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, 44(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, 40(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, 36(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, 32(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, 28(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, 24(%eax) -; X86-NEXT: movl %ebx, 20(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, 16(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, 12(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, 8(%eax) -; X86-NEXT: movl %edi, 4(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, (%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, 48(%eax) -; X86-NEXT: sete %al -; X86-NEXT: leal -12(%ebp), %esp +; X86-NEXT: shrl $3, %esi +; X86-NEXT: andl $60, %esi +; X86-NEXT: movl (%ecx,%esi), %edi +; X86-NEXT: btl %edx, %edi +; X86-NEXT: setae %al +; X86-NEXT: btrl %edx, %edi +; X86-NEXT: movl %edi, (%ecx,%esi) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl ; -; SSE-LABEL: reset_eq_i512: -; SSE: # %bb.0: -; SSE-NEXT: pushq %rbp -; SSE-NEXT: pushq %r15 -; SSE-NEXT: pushq %r14 -; SSE-NEXT: pushq %r13 -; SSE-NEXT: pushq %r12 -; SSE-NEXT: pushq %rbx -; SSE-NEXT: subq $56, %rsp -; SSE-NEXT: xorps %xmm0, %xmm0 -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movq $0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movq $1, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movl %esi, %ecx -; SSE-NEXT: andl $63, %ecx -; SSE-NEXT: shrl $3, %esi -; SSE-NEXT: andl $56, %esi -; SSE-NEXT: negl %esi -; SSE-NEXT: movslq %esi, %rdx -; SSE-NEXT: movq (%rsp,%rdx), %r9 -; SSE-NEXT: movq 8(%rsp,%rdx), %r8 -; SSE-NEXT: movq %r8, %rsi -; SSE-NEXT: shldq %cl, %r9, %rsi -; SSE-NEXT: movq -8(%rsp,%rdx), %rax -; SSE-NEXT: shldq %cl, %rax, %r9 -; SSE-NEXT: movq 16(%rsp,%rdx), %r14 -; SSE-NEXT: movq 24(%rsp,%rdx), %r10 -; SSE-NEXT: movq %r10, %rbx -; SSE-NEXT: shldq %cl, %r14, %rbx -; SSE-NEXT: shldq %cl, %r8, %r14 -; SSE-NEXT: movq 32(%rsp,%rdx), %r13 -; SSE-NEXT: movq 40(%rsp,%rdx), %r12 -; SSE-NEXT: shldq %cl, %r13, %r12 -; SSE-NEXT: shldq %cl, %r10, %r13 -; SSE-NEXT: movq -16(%rsp,%rdx), %rdx -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: # kill: def $cl killed $cl killed $ecx -; SSE-NEXT: shlq %cl, %rdx -; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq %r12, %rbp -; SSE-NEXT: movq %r9, %r15 -; SSE-NEXT: movq %rsi, %r11 -; SSE-NEXT: movq 16(%rdi), %r8 -; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 48(%rdi), %rcx -; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %rcx, %r13 -; SSE-NEXT: andq %r8, %r9 -; SSE-NEXT: orq %r13, %r9 -; SSE-NEXT: movq 56(%rdi), %rcx -; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %rcx, %r12 -; SSE-NEXT: movq 24(%rdi), %r10 -; SSE-NEXT: andq %r10, %rsi -; SSE-NEXT: orq %r12, %rsi -; SSE-NEXT: movq %r14, %r13 -; SSE-NEXT: movq 32(%rdi), %rcx -; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %rcx, %r14 -; SSE-NEXT: movq %rdx, %r12 -; SSE-NEXT: movq (%rdi), %rcx -; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %rcx, %rdx -; SSE-NEXT: orq %r14, %rdx -; SSE-NEXT: orq %r9, %rdx -; SSE-NEXT: movq %rbx, %r14 -; SSE-NEXT: movq 40(%rdi), %rcx -; SSE-NEXT: andq %rcx, %rbx -; SSE-NEXT: movq %rax, %r9 -; SSE-NEXT: movq 8(%rdi), %r8 -; SSE-NEXT: andq %r8, %rax -; SSE-NEXT: orq %rbx, %rax -; SSE-NEXT: orq %rsi, %rax -; SSE-NEXT: notq %r11 -; SSE-NEXT: andq %r10, %r11 -; SSE-NEXT: notq %r15 -; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload -; SSE-NEXT: notq %r14 -; SSE-NEXT: andq %rcx, %r14 -; SSE-NEXT: notq %r13 -; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload -; SSE-NEXT: notq %rbp -; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload -; SSE-NEXT: notq %rcx -; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload -; SSE-NEXT: notq %r9 -; SSE-NEXT: andq %r8, %r9 -; SSE-NEXT: notq %r12 -; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload -; SSE-NEXT: orq %rdx, %rax -; SSE-NEXT: movq %rcx, 48(%rdi) -; SSE-NEXT: movq %rbp, 56(%rdi) -; SSE-NEXT: movq %r13, 32(%rdi) -; SSE-NEXT: movq %r14, 40(%rdi) -; SSE-NEXT: movq %r15, 16(%rdi) -; SSE-NEXT: movq %r11, 24(%rdi) -; SSE-NEXT: movq %r12, (%rdi) -; SSE-NEXT: movq %r9, 8(%rdi) -; SSE-NEXT: sete %al -; SSE-NEXT: addq $56, %rsp -; SSE-NEXT: popq %rbx -; SSE-NEXT: popq %r12 -; SSE-NEXT: popq %r13 -; SSE-NEXT: popq %r14 -; SSE-NEXT: popq %r15 -; SSE-NEXT: popq %rbp -; SSE-NEXT: retq -; -; AVX2-LABEL: reset_eq_i512: -; AVX2: # %bb.0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: pushq %r15 -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: pushq %r13 -; AVX2-NEXT: pushq %r12 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: pushq %rax -; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovss {{.*#+}} xmm0 = [1,0,0,0] -; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movl %esi, %ecx -; AVX2-NEXT: andl $63, %ecx -; AVX2-NEXT: shrl $3, %esi -; AVX2-NEXT: andl $56, %esi -; AVX2-NEXT: negl %esi -; AVX2-NEXT: movslq %esi, %rdx -; AVX2-NEXT: movq -48(%rsp,%rdx), %r8 -; AVX2-NEXT: movq -40(%rsp,%rdx), %rbx -; AVX2-NEXT: movq %rbx, %rax -; AVX2-NEXT: shldq %cl, %r8, %rax -; AVX2-NEXT: movq -16(%rsp,%rdx), %r10 -; AVX2-NEXT: movq -8(%rsp,%rdx), %rsi -; AVX2-NEXT: shldq %cl, %r10, %rsi -; AVX2-NEXT: movq -32(%rsp,%rdx), %r11 -; AVX2-NEXT: movq -24(%rsp,%rdx), %r14 -; AVX2-NEXT: movq %r14, %r9 -; AVX2-NEXT: shldq %cl, %r11, %r9 -; AVX2-NEXT: movq -64(%rsp,%rdx), %r15 -; AVX2-NEXT: movq -56(%rsp,%rdx), %rdx -; AVX2-NEXT: shldq %cl, %rdx, %r8 -; AVX2-NEXT: shldq %cl, %r14, %r10 -; AVX2-NEXT: shldq %cl, %rbx, %r11 -; AVX2-NEXT: shldq %cl, %r15, %rdx -; AVX2-NEXT: shlxq %rcx, %r15, %rcx -; AVX2-NEXT: movq 24(%rdi), %rbx -; AVX2-NEXT: movq 56(%rdi), %r14 -; AVX2-NEXT: movq 16(%rdi), %r15 -; AVX2-NEXT: movq 48(%rdi), %r13 -; AVX2-NEXT: movq 32(%rdi), %rbp -; AVX2-NEXT: andnq %rbp, %r11, %r12 -; AVX2-NEXT: andq %r11, %rbp -; AVX2-NEXT: andnq %r13, %r10, %r11 -; AVX2-NEXT: andq %r10, %r13 -; AVX2-NEXT: andnq %r15, %r8, %r10 -; AVX2-NEXT: andq %r8, %r15 -; AVX2-NEXT: movq 40(%rdi), %r8 -; AVX2-NEXT: orq %r13, %r15 -; AVX2-NEXT: andnq %r8, %r9, %r13 -; AVX2-NEXT: andq %r9, %r8 -; AVX2-NEXT: andnq %r14, %rsi, %r9 -; AVX2-NEXT: andq %rsi, %r14 -; AVX2-NEXT: andnq %rbx, %rax, %rsi -; AVX2-NEXT: andq %rax, %rbx -; AVX2-NEXT: movq (%rdi), %rax -; AVX2-NEXT: orq %r14, %rbx -; AVX2-NEXT: andnq %rax, %rcx, %r14 -; AVX2-NEXT: andq %rcx, %rax -; AVX2-NEXT: orq %rbp, %rax -; AVX2-NEXT: movq 8(%rdi), %rcx -; AVX2-NEXT: orq %r15, %rax -; AVX2-NEXT: andnq %rcx, %rdx, %r15 -; AVX2-NEXT: andq %rdx, %rcx -; AVX2-NEXT: orq %r8, %rcx -; AVX2-NEXT: orq %rbx, %rcx -; AVX2-NEXT: orq %rax, %rcx -; AVX2-NEXT: movq %r11, 48(%rdi) -; AVX2-NEXT: movq %r9, 56(%rdi) -; AVX2-NEXT: movq %r12, 32(%rdi) -; AVX2-NEXT: movq %r13, 40(%rdi) -; AVX2-NEXT: movq %r10, 16(%rdi) -; AVX2-NEXT: movq %rsi, 24(%rdi) -; AVX2-NEXT: movq %r14, (%rdi) -; AVX2-NEXT: movq %r15, 8(%rdi) -; AVX2-NEXT: sete %al -; AVX2-NEXT: addq $8, %rsp -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r12 -; AVX2-NEXT: popq %r13 -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: popq %r15 -; AVX2-NEXT: popq %rbp -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512-LABEL: reset_eq_i512: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %rbp -; AVX512-NEXT: pushq %r15 -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %r13 -; AVX512-NEXT: pushq %r12 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: pushq %rax -; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [1,0,0,0] -; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: movl %esi, %ecx -; AVX512-NEXT: andl $63, %ecx -; AVX512-NEXT: shrl $3, %esi -; AVX512-NEXT: andl $56, %esi -; AVX512-NEXT: negl %esi -; AVX512-NEXT: movslq %esi, %rbx -; AVX512-NEXT: movq -48(%rsp,%rbx), %r8 -; AVX512-NEXT: movq -40(%rsp,%rbx), %r14 -; AVX512-NEXT: movq %r14, %rax -; AVX512-NEXT: shldq %cl, %r8, %rax -; AVX512-NEXT: movq -16(%rsp,%rbx), %r10 -; AVX512-NEXT: movq -8(%rsp,%rbx), %rsi -; AVX512-NEXT: shldq %cl, %r10, %rsi -; AVX512-NEXT: movq -32(%rsp,%rbx), %r11 -; AVX512-NEXT: movq -24(%rsp,%rbx), %r15 -; AVX512-NEXT: movq %r15, %r9 -; AVX512-NEXT: shldq %cl, %r11, %r9 -; AVX512-NEXT: movq -56(%rsp,%rbx), %rdx -; AVX512-NEXT: shldq %cl, %rdx, %r8 -; AVX512-NEXT: shldq %cl, %r15, %r10 -; AVX512-NEXT: shldq %cl, %r14, %r11 -; AVX512-NEXT: movq -64(%rsp,%rbx), %rbx -; AVX512-NEXT: shldq %cl, %rbx, %rdx -; AVX512-NEXT: shlxq %rcx, %rbx, %rcx -; AVX512-NEXT: movq 24(%rdi), %rbx -; AVX512-NEXT: movq 56(%rdi), %r14 -; AVX512-NEXT: movq 16(%rdi), %r15 -; AVX512-NEXT: movq 48(%rdi), %r13 -; AVX512-NEXT: movq 32(%rdi), %rbp -; AVX512-NEXT: andnq %rbp, %r11, %r12 -; AVX512-NEXT: andq %r11, %rbp -; AVX512-NEXT: andnq %r13, %r10, %r11 -; AVX512-NEXT: andq %r10, %r13 -; AVX512-NEXT: andnq %r15, %r8, %r10 -; AVX512-NEXT: andq %r8, %r15 -; AVX512-NEXT: movq 40(%rdi), %r8 -; AVX512-NEXT: orq %r13, %r15 -; AVX512-NEXT: andnq %r8, %r9, %r13 -; AVX512-NEXT: andq %r9, %r8 -; AVX512-NEXT: andnq %r14, %rsi, %r9 -; AVX512-NEXT: andq %rsi, %r14 -; AVX512-NEXT: andnq %rbx, %rax, %rsi -; AVX512-NEXT: andq %rax, %rbx -; AVX512-NEXT: movq (%rdi), %rax -; AVX512-NEXT: orq %r14, %rbx -; AVX512-NEXT: andnq %rax, %rcx, %r14 -; AVX512-NEXT: andq %rcx, %rax -; AVX512-NEXT: orq %rbp, %rax -; AVX512-NEXT: movq 8(%rdi), %rcx -; AVX512-NEXT: orq %r15, %rax -; AVX512-NEXT: andnq %rcx, %rdx, %r15 -; AVX512-NEXT: andq %rdx, %rcx -; AVX512-NEXT: orq %r8, %rcx -; AVX512-NEXT: orq %rbx, %rcx -; AVX512-NEXT: orq %rax, %rcx -; AVX512-NEXT: movq %r11, 48(%rdi) -; AVX512-NEXT: movq %r9, 56(%rdi) -; AVX512-NEXT: movq %r12, 32(%rdi) -; AVX512-NEXT: movq %r13, 40(%rdi) -; AVX512-NEXT: movq %r10, 16(%rdi) -; AVX512-NEXT: movq %rsi, 24(%rdi) -; AVX512-NEXT: movq %r14, (%rdi) -; AVX512-NEXT: movq %r15, 8(%rdi) -; AVX512-NEXT: sete %al -; AVX512-NEXT: addq $8, %rsp -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r12 -; AVX512-NEXT: popq %r13 -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: popq %r15 -; AVX512-NEXT: popq %rbp -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; X64-LABEL: reset_eq_i512: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: shrl $3, %ecx +; X64-NEXT: andl $60, %ecx +; X64-NEXT: movl (%rdi,%rcx), %edx +; X64-NEXT: btl %esi, %edx +; X64-NEXT: setae %al +; X64-NEXT: btrl %esi, %edx +; X64-NEXT: movl %edx, (%rdi,%rcx) +; X64-NEXT: retq %rem = and i32 %position, 511 %ofs = zext nneg i32 %rem to i512 %bit = shl nuw i512 1, %ofs @@ -2797,572 +929,33 @@ define i1 @reset_eq_i512(ptr %word, i32 %position) nounwind { define i1 @set_ne_i512(ptr %word, i32 %position) nounwind { ; X86-LABEL: set_ne_i512: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: movl %esp, %ebp -; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: andl $-16, %esp -; X86-NEXT: subl $272, %esp # imm = 0x110 -; X86-NEXT: movl 12(%ebp), %ecx -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: shrl $3, %eax -; X86-NEXT: andl $60, %eax -; X86-NEXT: movl %eax, (%esp) # 4-byte Spill -; X86-NEXT: leal {{[0-9]+}}(%esp), %edx -; X86-NEXT: subl %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $1, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl 24(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 28(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl $31, %ecx -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 56(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 60(%edx), %esi -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 12(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 40(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 44(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 16(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 20(%edx), %ebx -; X86-NEXT: movl %ebx, %esi -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 32(%edx), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 36(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edi, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl 52(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: shldl %cl, %esi, %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 4(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%ebp), %edx -; X86-NEXT: movl 40(%edx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %esi, %eax -; X86-NEXT: movl 8(%edx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ebx, %esi -; X86-NEXT: orl %eax, %esi -; X86-NEXT: movl %edx, %eax -; X86-NEXT: movl 56(%edx), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edi, %ebx -; X86-NEXT: movl 24(%edx), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: orl %ebx, %edx -; X86-NEXT: orl %esi, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 44(%eax), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: movl 12(%eax), %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload -; X86-NEXT: orl %esi, %ebx -; X86-NEXT: movl 60(%eax), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %eax, %esi -; X86-NEXT: movl %edx, %eax -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl 28(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: orl %eax, %edx -; X86-NEXT: orl %ebx, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl (%eax), %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl (%esp), %eax # 4-byte Reload -; X86-NEXT: negl %eax -; X86-NEXT: movl 240(%esp,%eax), %esi -; X86-NEXT: shldl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %esi -; X86-NEXT: movl %esi, (%esp) # 4-byte Spill -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: shll %cl, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%ebp), %esi -; X86-NEXT: movl 32(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edi, %eax -; X86-NEXT: movl (%esi), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edx, %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl 16(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ebx, %eax -; X86-NEXT: movl 48(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl (%esp), %edx # 4-byte Folded Reload -; X86-NEXT: orl %eax, %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 36(%esi), %ebx -; X86-NEXT: movl %ebx, %eax -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl 4(%esi), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl %esi, %eax -; X86-NEXT: movl 20(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl %esi, %edi -; X86-NEXT: movl 52(%eax), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: orl %edi, %eax -; X86-NEXT: orl %ecx, %eax -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: orl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: orl %ecx, (%esp) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload -; X86-NEXT: orl %edx, %eax -; X86-NEXT: movl 8(%ebp), %edx -; X86-NEXT: movl %ebx, 60(%edx) -; X86-NEXT: movl %edi, 56(%edx) -; X86-NEXT: movl %ecx, 52(%edx) -; X86-NEXT: movl %esi, 44(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 40(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 36(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 32(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 28(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 24(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 20(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 16(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 12(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 8(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 4(%edx) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, (%edx) -; X86-NEXT: movl (%esp), %eax # 4-byte Reload -; X86-NEXT: movl %eax, 48(%edx) -; X86-NEXT: setne %al -; X86-NEXT: leal -12(%ebp), %esp +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %edx, %esi +; X86-NEXT: shrl $3, %esi +; X86-NEXT: andl $60, %esi +; X86-NEXT: movl (%ecx,%esi), %edi +; X86-NEXT: btl %edx, %edi +; X86-NEXT: setb %al +; X86-NEXT: btsl %edx, %edi +; X86-NEXT: movl %edi, (%ecx,%esi) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl ; -; SSE-LABEL: set_ne_i512: -; SSE: # %bb.0: -; SSE-NEXT: pushq %rbp -; SSE-NEXT: pushq %r15 -; SSE-NEXT: pushq %r14 -; SSE-NEXT: pushq %r13 -; SSE-NEXT: pushq %r12 -; SSE-NEXT: pushq %rbx -; SSE-NEXT: subq $56, %rsp -; SSE-NEXT: xorps %xmm0, %xmm0 -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movq $0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movq $1, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movl %esi, %ecx -; SSE-NEXT: andl $63, %ecx -; SSE-NEXT: shrl $3, %esi -; SSE-NEXT: andl $56, %esi -; SSE-NEXT: negl %esi -; SSE-NEXT: movslq %esi, %rbx -; SSE-NEXT: movq (%rsp,%rbx), %rsi -; SSE-NEXT: movq 8(%rsp,%rbx), %r14 -; SSE-NEXT: movq %r14, %rax -; SSE-NEXT: shldq %cl, %rsi, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 32(%rsp,%rbx), %r8 -; SSE-NEXT: movq 40(%rsp,%rbx), %rbp -; SSE-NEXT: shldq %cl, %r8, %rbp -; SSE-NEXT: movq 16(%rsp,%rbx), %r9 -; SSE-NEXT: movq 24(%rsp,%rbx), %r15 -; SSE-NEXT: movq %r15, %r10 -; SSE-NEXT: shldq %cl, %r9, %r10 -; SSE-NEXT: movq -8(%rsp,%rbx), %r11 -; SSE-NEXT: shldq %cl, %r11, %rsi -; SSE-NEXT: shldq %cl, %r15, %r8 -; SSE-NEXT: shldq %cl, %r14, %r9 -; SSE-NEXT: movq -16(%rsp,%rbx), %rbx -; SSE-NEXT: shldq %cl, %rbx, %r11 -; SSE-NEXT: # kill: def $cl killed $cl killed $ecx -; SSE-NEXT: shlq %cl, %rbx -; SSE-NEXT: movq 24(%rdi), %r15 -; SSE-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 56(%rdi), %rcx -; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 16(%rdi), %r12 -; SSE-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 48(%rdi), %r13 -; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %r8, %r13 -; SSE-NEXT: andq %rsi, %r12 -; SSE-NEXT: orq %r13, %r12 -; SSE-NEXT: movq %rcx, %r13 -; SSE-NEXT: andq %rbp, %r13 -; SSE-NEXT: andq %rax, %r15 -; SSE-NEXT: orq %r13, %r15 -; SSE-NEXT: movq 32(%rdi), %r14 -; SSE-NEXT: movq %r14, %rcx -; SSE-NEXT: andq %r9, %rcx -; SSE-NEXT: movq (%rdi), %r13 -; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %rbx, %r13 -; SSE-NEXT: orq %rcx, %r13 -; SSE-NEXT: orq %r12, %r13 -; SSE-NEXT: movq 40(%rdi), %rcx -; SSE-NEXT: movq %rcx, %r12 -; SSE-NEXT: andq %r10, %r12 -; SSE-NEXT: movq 8(%rdi), %rdx -; SSE-NEXT: movq %rdx, %rax -; SSE-NEXT: andq %r11, %rax -; SSE-NEXT: orq %r12, %rax -; SSE-NEXT: orq %r15, %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload -; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload -; SSE-NEXT: orq %rcx, %r10 -; SSE-NEXT: orq %r14, %r9 -; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload -; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload -; SSE-NEXT: orq %rdx, %r11 -; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload -; SSE-NEXT: orq %r13, %rax -; SSE-NEXT: movq %r8, 48(%rdi) -; SSE-NEXT: movq %rbp, 56(%rdi) -; SSE-NEXT: movq %r9, 32(%rdi) -; SSE-NEXT: movq %r10, 40(%rdi) -; SSE-NEXT: movq %rsi, 16(%rdi) -; SSE-NEXT: movq %r15, 24(%rdi) -; SSE-NEXT: movq %rbx, (%rdi) -; SSE-NEXT: movq %r11, 8(%rdi) -; SSE-NEXT: setne %al -; SSE-NEXT: addq $56, %rsp -; SSE-NEXT: popq %rbx -; SSE-NEXT: popq %r12 -; SSE-NEXT: popq %r13 -; SSE-NEXT: popq %r14 -; SSE-NEXT: popq %r15 -; SSE-NEXT: popq %rbp -; SSE-NEXT: retq -; -; AVX2-LABEL: set_ne_i512: -; AVX2: # %bb.0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: pushq %r15 -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: pushq %r13 -; AVX2-NEXT: pushq %r12 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: subq $72, %rsp -; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovss {{.*#+}} xmm0 = [1,0,0,0] -; AVX2-NEXT: vmovups %ymm0, (%rsp) -; AVX2-NEXT: movl %esi, %ecx -; AVX2-NEXT: andl $63, %ecx -; AVX2-NEXT: shrl $3, %esi -; AVX2-NEXT: andl $56, %esi -; AVX2-NEXT: negl %esi -; AVX2-NEXT: movslq %esi, %rbx -; AVX2-NEXT: movq 16(%rsp,%rbx), %rsi -; AVX2-NEXT: movq 24(%rsp,%rbx), %rbp -; AVX2-NEXT: movq %rbp, %rax -; AVX2-NEXT: shldq %cl, %rsi, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 48(%rsp,%rbx), %r8 -; AVX2-NEXT: movq 56(%rsp,%rbx), %r13 -; AVX2-NEXT: shldq %cl, %r8, %r13 -; AVX2-NEXT: movq 32(%rsp,%rbx), %r9 -; AVX2-NEXT: movq 40(%rsp,%rbx), %r14 -; AVX2-NEXT: movq %r14, %r10 -; AVX2-NEXT: shldq %cl, %r9, %r10 -; AVX2-NEXT: movq 8(%rsp,%rbx), %r11 -; AVX2-NEXT: shldq %cl, %r11, %rsi -; AVX2-NEXT: shldq %cl, %r14, %r8 -; AVX2-NEXT: movq 16(%rdi), %r12 -; AVX2-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 48(%rdi), %r14 -; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq %r8, %r14 -; AVX2-NEXT: andq %rsi, %r12 -; AVX2-NEXT: orq %r14, %r12 -; AVX2-NEXT: movq 56(%rdi), %r15 -; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq %r13, %r15 -; AVX2-NEXT: movq 24(%rdi), %r14 -; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq %rax, %r14 -; AVX2-NEXT: orq %r15, %r14 -; AVX2-NEXT: shldq %cl, %rbp, %r9 -; AVX2-NEXT: movq (%rsp,%rbx), %rdx -; AVX2-NEXT: movq 32(%rdi), %r15 -; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq %r9, %r15 -; AVX2-NEXT: shlxq %rcx, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq (%rdi), %rbx -; AVX2-NEXT: movq %rbx, %rbp -; AVX2-NEXT: andq %rax, %rbp -; AVX2-NEXT: orq %r15, %rbp -; AVX2-NEXT: orq %r12, %rbp -; AVX2-NEXT: # kill: def $cl killed $cl killed $rcx -; AVX2-NEXT: shldq %cl, %rdx, %r11 -; AVX2-NEXT: movq 40(%rdi), %rax -; AVX2-NEXT: movq %rax, %rcx -; AVX2-NEXT: andq %r10, %rcx -; AVX2-NEXT: movq 8(%rdi), %r15 -; AVX2-NEXT: movq %r15, %r12 -; AVX2-NEXT: andq %r11, %r12 -; AVX2-NEXT: orq %rcx, %r12 -; AVX2-NEXT: orq %r14, %r12 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload -; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload -; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload -; AVX2-NEXT: orq %rax, %r10 -; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload -; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload -; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload -; AVX2-NEXT: orq %r15, %r11 -; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload -; AVX2-NEXT: orq %rbp, %r12 -; AVX2-NEXT: movq %r8, 48(%rdi) -; AVX2-NEXT: movq %r13, 56(%rdi) -; AVX2-NEXT: movq %r9, 32(%rdi) -; AVX2-NEXT: movq %r10, 40(%rdi) -; AVX2-NEXT: movq %rsi, 16(%rdi) -; AVX2-NEXT: movq %rcx, 24(%rdi) -; AVX2-NEXT: movq %rbx, (%rdi) -; AVX2-NEXT: movq %r11, 8(%rdi) -; AVX2-NEXT: setne %al -; AVX2-NEXT: addq $72, %rsp -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r12 -; AVX2-NEXT: popq %r13 -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: popq %r15 -; AVX2-NEXT: popq %rbp -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512-LABEL: set_ne_i512: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %rbp -; AVX512-NEXT: pushq %r15 -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %r13 -; AVX512-NEXT: pushq %r12 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: subq $72, %rsp -; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [1,0,0,0] -; AVX512-NEXT: vmovups %ymm0, (%rsp) -; AVX512-NEXT: movl %esi, %ecx -; AVX512-NEXT: andl $63, %ecx -; AVX512-NEXT: shrl $3, %esi -; AVX512-NEXT: andl $56, %esi -; AVX512-NEXT: negl %esi -; AVX512-NEXT: movslq %esi, %rbx -; AVX512-NEXT: movq 16(%rsp,%rbx), %rsi -; AVX512-NEXT: movq 24(%rsp,%rbx), %rbp -; AVX512-NEXT: movq %rbp, %rax -; AVX512-NEXT: shldq %cl, %rsi, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 48(%rsp,%rbx), %r8 -; AVX512-NEXT: movq 56(%rsp,%rbx), %r13 -; AVX512-NEXT: shldq %cl, %r8, %r13 -; AVX512-NEXT: movq 32(%rsp,%rbx), %r9 -; AVX512-NEXT: movq 40(%rsp,%rbx), %r14 -; AVX512-NEXT: movq %r14, %r10 -; AVX512-NEXT: shldq %cl, %r9, %r10 -; AVX512-NEXT: movq 8(%rsp,%rbx), %r11 -; AVX512-NEXT: shldq %cl, %r11, %rsi -; AVX512-NEXT: shldq %cl, %r14, %r8 -; AVX512-NEXT: movq 16(%rdi), %r12 -; AVX512-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 48(%rdi), %r14 -; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: andq %r8, %r14 -; AVX512-NEXT: andq %rsi, %r12 -; AVX512-NEXT: orq %r14, %r12 -; AVX512-NEXT: movq 56(%rdi), %r15 -; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: andq %r13, %r15 -; AVX512-NEXT: movq 24(%rdi), %r14 -; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: andq %rax, %r14 -; AVX512-NEXT: orq %r15, %r14 -; AVX512-NEXT: shldq %cl, %rbp, %r9 -; AVX512-NEXT: movq (%rsp,%rbx), %rdx -; AVX512-NEXT: movq 32(%rdi), %r15 -; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: andq %r9, %r15 -; AVX512-NEXT: shlxq %rcx, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq (%rdi), %rbx -; AVX512-NEXT: movq %rbx, %rbp -; AVX512-NEXT: andq %rax, %rbp -; AVX512-NEXT: orq %r15, %rbp -; AVX512-NEXT: orq %r12, %rbp -; AVX512-NEXT: # kill: def $cl killed $cl killed $rcx -; AVX512-NEXT: shldq %cl, %rdx, %r11 -; AVX512-NEXT: movq 40(%rdi), %rax -; AVX512-NEXT: movq %rax, %rcx -; AVX512-NEXT: andq %r10, %rcx -; AVX512-NEXT: movq 8(%rdi), %r15 -; AVX512-NEXT: movq %r15, %r12 -; AVX512-NEXT: andq %r11, %r12 -; AVX512-NEXT: orq %rcx, %r12 -; AVX512-NEXT: orq %r14, %r12 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload -; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload -; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload -; AVX512-NEXT: orq %rax, %r10 -; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload -; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload -; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload -; AVX512-NEXT: orq %r15, %r11 -; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload -; AVX512-NEXT: orq %rbp, %r12 -; AVX512-NEXT: movq %r8, 48(%rdi) -; AVX512-NEXT: movq %r13, 56(%rdi) -; AVX512-NEXT: movq %r9, 32(%rdi) -; AVX512-NEXT: movq %r10, 40(%rdi) -; AVX512-NEXT: movq %rsi, 16(%rdi) -; AVX512-NEXT: movq %rcx, 24(%rdi) -; AVX512-NEXT: movq %rbx, (%rdi) -; AVX512-NEXT: movq %r11, 8(%rdi) -; AVX512-NEXT: setne %al -; AVX512-NEXT: addq $72, %rsp -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r12 -; AVX512-NEXT: popq %r13 -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: popq %r15 -; AVX512-NEXT: popq %rbp -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; X64-LABEL: set_ne_i512: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: shrl $3, %ecx +; X64-NEXT: andl $60, %ecx +; X64-NEXT: movl (%rdi,%rcx), %edx +; X64-NEXT: btl %esi, %edx +; X64-NEXT: setb %al +; X64-NEXT: btsl %esi, %edx +; X64-NEXT: movl %edx, (%rdi,%rcx) +; X64-NEXT: retq %rem = and i32 %position, 511 %ofs = zext nneg i32 %rem to i512 %bit = shl nuw i512 1, %ofs @@ -3383,13 +976,14 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi ; X86-NEXT: andl $-16, %esp -; X86-NEXT: subl $432, %esp # imm = 0x1B0 +; X86-NEXT: subl $352, %esp # imm = 0x160 ; X86-NEXT: movl 12(%ebp), %ecx ; X86-NEXT: movl %ecx, %edx ; X86-NEXT: shrl $3, %edx ; X86-NEXT: andl $60, %edx -; X86-NEXT: leal {{[0-9]+}}(%esp), %esi -; X86-NEXT: subl %edx, %esi +; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: leal {{[0-9]+}}(%esp), %eax +; X86-NEXT: subl %edx, %eax ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) @@ -3422,60 +1016,58 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl 56(%esi), %eax +; X86-NEXT: movl 56(%eax), %esi +; X86-NEXT: movl 60(%eax), %ebx +; X86-NEXT: movl 52(%eax), %edi +; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl 48(%eax), %edi +; X86-NEXT: movl 44(%eax), %edx +; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl 40(%eax), %edx +; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl 36(%eax), %edx +; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl 32(%eax), %edx +; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl 28(%eax), %edx +; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl 24(%eax), %edx +; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl 20(%eax), %edx +; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl 16(%eax), %edx +; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl 12(%eax), %edx +; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl 8(%eax), %edx +; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl 4(%eax), %eax ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 60(%esi), %eax +; X86-NEXT: movzbl 16(%ebp), %eax +; X86-NEXT: movzbl %al, %eax +; X86-NEXT: movl %eax, {{[0-9]+}}(%esp) +; X86-NEXT: andl $31, %ecx +; X86-NEXT: shldl %cl, %esi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; X86-NEXT: shldl %cl, %eax, %esi +; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: shldl %cl, %edi, %eax ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 52(%esi), %eax -; X86-NEXT: movl 48(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 40(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 44(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 36(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 32(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 28(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 24(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 20(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 16(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 12(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%esi), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl (%esi), %edi +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: shldl %cl, %ebx, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 4(%esi), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movzbl 16(%ebp), %ebx -; X86-NEXT: movzbl %bl, %esi -; X86-NEXT: movl %esi, {{[0-9]+}}(%esp) -; X86-NEXT: leal {{[0-9]+}}(%esp), %esi -; X86-NEXT: subl %edx, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl $31, %ecx ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill +; X86-NEXT: shldl %cl, %edx, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload ; X86-NEXT: shldl %cl, %eax, %edx ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: shldl %cl, %ebx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: shldl %cl, %esi, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: shldl %cl, %edx, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: shldl %cl, %edx, %eax +; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload ; X86-NEXT: shldl %cl, %eax, %edx ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill @@ -3500,9 +1092,12 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload ; X86-NEXT: shldl %cl, %eax, %esi ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: movl %ebx, %edx -; X86-NEXT: shldl %cl, %edi, %edx +; X86-NEXT: shll %cl, %eax +; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: leal {{[0-9]+}}(%esp), %eax +; X86-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) @@ -3534,273 +1129,148 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) ; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl 8(%ebp), %ebx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 48(%ebx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %esi, %edx +; X86-NEXT: movl 56(%eax), %esi +; X86-NEXT: movl 60(%eax), %edi +; X86-NEXT: shldl %cl, %esi, %edi +; X86-NEXT: movl 8(%ebp), %edx +; X86-NEXT: andl 60(%edx), %ebx +; X86-NEXT: orl %edi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 52(%eax), %edi +; X86-NEXT: shldl %cl, %edi, %esi +; X86-NEXT: andl 56(%edx), %ebx +; X86-NEXT: orl %esi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 48(%eax), %esi +; X86-NEXT: shldl %cl, %esi, %edi +; X86-NEXT: andl 52(%edx), %ebx +; X86-NEXT: orl %edi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 44(%eax), %edi +; X86-NEXT: shldl %cl, %edi, %esi +; X86-NEXT: andl 48(%edx), %ebx +; X86-NEXT: orl %esi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 40(%eax), %esi +; X86-NEXT: shldl %cl, %esi, %edi +; X86-NEXT: andl 44(%edx), %ebx +; X86-NEXT: orl %edi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 36(%eax), %edi +; X86-NEXT: shldl %cl, %edi, %esi +; X86-NEXT: andl 40(%edx), %ebx +; X86-NEXT: orl %esi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 32(%eax), %esi +; X86-NEXT: shldl %cl, %esi, %edi +; X86-NEXT: andl 36(%edx), %ebx +; X86-NEXT: orl %edi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 28(%eax), %edi +; X86-NEXT: shldl %cl, %edi, %esi +; X86-NEXT: andl 32(%edx), %ebx +; X86-NEXT: orl %esi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 24(%eax), %esi +; X86-NEXT: shldl %cl, %esi, %edi +; X86-NEXT: andl 28(%edx), %ebx +; X86-NEXT: orl %edi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 20(%eax), %edi +; X86-NEXT: shldl %cl, %edi, %esi +; X86-NEXT: andl 24(%edx), %ebx +; X86-NEXT: orl %esi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 16(%eax), %esi +; X86-NEXT: shldl %cl, %esi, %edi +; X86-NEXT: andl 20(%edx), %ebx +; X86-NEXT: orl %edi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 12(%eax), %edi +; X86-NEXT: shldl %cl, %edi, %esi +; X86-NEXT: andl 16(%edx), %ebx +; X86-NEXT: orl %esi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 8(%eax), %esi +; X86-NEXT: shldl %cl, %esi, %edi +; X86-NEXT: andl 12(%edx), %ebx +; X86-NEXT: orl %edi, %ebx +; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-NEXT: notl %ebx +; X86-NEXT: movl 4(%eax), %edi +; X86-NEXT: shldl %cl, %edi, %esi +; X86-NEXT: andl 8(%edx), %ebx +; X86-NEXT: orl %esi, %ebx ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 16(%ebx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %eax, %esi -; X86-NEXT: orl %edx, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 40(%ebx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %eax, %edx -; X86-NEXT: movl %edx, %esi +; X86-NEXT: notl %esi +; X86-NEXT: movl (%eax), %eax +; X86-NEXT: shldl %cl, %eax, %edi +; X86-NEXT: andl 4(%edx), %esi +; X86-NEXT: orl %edi, %esi +; X86-NEXT: movl %esi, %edi +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload +; X86-NEXT: notl %esi +; X86-NEXT: shll %cl, %eax +; X86-NEXT: andl (%edx), %esi +; X86-NEXT: orl %eax, %esi ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%ebx), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edx, %eax -; X86-NEXT: orl %esi, %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 56(%ebx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %esi, %edx -; X86-NEXT: movl %edx, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 24(%ebx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %esi, %edx -; X86-NEXT: orl %edi, %edx -; X86-NEXT: orl %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl (%edx,%eax), %eax +; X86-NEXT: btl %ecx, %eax ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 52(%ebx), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edx, %eax -; X86-NEXT: movl %eax, %edx +; X86-NEXT: movl %eax, 60(%edx) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 20(%ebx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %esi, %eax -; X86-NEXT: orl %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl %eax, 56(%edx) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 44(%ebx), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edx, %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 12(%ebx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %esi, %edi -; X86-NEXT: orl %eax, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 60(%ebx), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edx, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 28(%ebx), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %eax, %edx -; X86-NEXT: orl %esi, %edx -; X86-NEXT: orl %edi, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload +; X86-NEXT: movl %eax, 52(%edx) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %edx -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: shll %cl, %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 32(%ebx), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %esi, %ecx -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl (%ebx), %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edi, %eax -; X86-NEXT: orl %ecx, %eax -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl %eax, 48(%edx) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 36(%ebx), %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %ecx, %eax -; X86-NEXT: movl %eax, %ecx -; X86-NEXT: movl %edx, %eax -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 4(%ebx), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl %edx, %eax -; X86-NEXT: orl %ecx, %eax -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl 56(%edi), %ebx -; X86-NEXT: movl 60(%edi), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: shldl %cl, %ebx, %eax -; X86-NEXT: orl %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 52(%edi), %eax -; X86-NEXT: shldl %cl, %eax, %ebx -; X86-NEXT: orl %ebx, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 48(%edi), %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %esi, %eax -; X86-NEXT: orl %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl %eax, 44(%edx) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: notl %eax -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl %eax, %edx -; X86-NEXT: movl 40(%edi), %ebx -; X86-NEXT: movl 44(%edi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %ebx, %eax -; X86-NEXT: orl %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 36(%edi), %eax -; X86-NEXT: shldl %cl, %eax, %ebx -; X86-NEXT: orl %ebx, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 32(%edi), %ebx -; X86-NEXT: shldl %cl, %ebx, %eax -; X86-NEXT: orl %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 28(%edi), %eax -; X86-NEXT: shldl %cl, %eax, %ebx -; X86-NEXT: orl %ebx, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 24(%edi), %ebx -; X86-NEXT: shldl %cl, %ebx, %eax -; X86-NEXT: orl %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 20(%edi), %eax -; X86-NEXT: shldl %cl, %eax, %ebx -; X86-NEXT: orl %ebx, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 16(%edi), %ebx -; X86-NEXT: shldl %cl, %ebx, %eax -; X86-NEXT: orl %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl 12(%edi), %eax -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: shldl %cl, %eax, %ebx -; X86-NEXT: orl %ebx, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: notl %esi -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: movl 8(%edi), %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: orl %eax, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-NEXT: movl %eax, 40(%edx) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: notl %eax -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl 4(%edi), %ebx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: shldl %cl, %ebx, %edx -; X86-NEXT: orl %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: # kill: def $cl killed $cl killed $ecx +; X86-NEXT: movl %eax, 36(%edx) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: shldl %cl, %esi, %eax -; X86-NEXT: orl %eax, %edx -; X86-NEXT: movl %edx, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: notl %edx -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: movl (%edi), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %ebx -; X86-NEXT: orl %ebx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: notl %edi -; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: shll %cl, %eax -; X86-NEXT: orl %eax, %edi -; X86-NEXT: movl %edi, %ecx +; X86-NEXT: movl %eax, 32(%edx) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: movl 8(%ebp), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 60(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 56(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 52(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 44(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 40(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 36(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 32(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 28(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 24(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 20(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 16(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 12(%eax) -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl %edi, 8(%eax) -; X86-NEXT: movl %edx, 4(%eax) -; X86-NEXT: movl %ecx, (%eax) -; X86-NEXT: movl %esi, 48(%eax) -; X86-NEXT: sete %al +; X86-NEXT: movl %eax, 28(%edx) +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; X86-NEXT: movl %eax, 24(%edx) +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; X86-NEXT: movl %eax, 20(%edx) +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; X86-NEXT: movl %eax, 16(%edx) +; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; X86-NEXT: movl %eax, 12(%edx) +; X86-NEXT: movl %ebx, 8(%edx) +; X86-NEXT: movl %edi, 4(%edx) +; X86-NEXT: movl %esi, (%edx) +; X86-NEXT: setae %al ; X86-NEXT: leal -12(%ebp), %esp ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi @@ -3816,7 +1286,8 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; SSE-NEXT: pushq %r13 ; SSE-NEXT: pushq %r12 ; SSE-NEXT: pushq %rbx -; SSE-NEXT: subq $216, %rsp +; SSE-NEXT: subq $184, %rsp +; SSE-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) ; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) @@ -3829,139 +1300,103 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; SSE-NEXT: movq $1, {{[0-9]+}}(%rsp) ; SSE-NEXT: movl %esi, %ecx ; SSE-NEXT: andl $63, %ecx -; SSE-NEXT: shrl $3, %esi -; SSE-NEXT: andl $56, %esi -; SSE-NEXT: negl %esi -; SSE-NEXT: movslq %esi, %r10 -; SSE-NEXT: movq 184(%rsp,%r10), %r11 -; SSE-NEXT: movq 192(%rsp,%r10), %rsi -; SSE-NEXT: movq %rsi, %r13 -; SSE-NEXT: shldq %cl, %r11, %r13 -; SSE-NEXT: movq 200(%rsp,%r10), %r15 -; SSE-NEXT: shldq %cl, %rsi, %r15 -; SSE-NEXT: movq 168(%rsp,%r10), %rbx -; SSE-NEXT: movq 176(%rsp,%r10), %rsi -; SSE-NEXT: movq %rsi, %r14 -; SSE-NEXT: shldq %cl, %rbx, %r14 -; SSE-NEXT: shldq %cl, %rsi, %r11 -; SSE-NEXT: movq 152(%rsp,%r10), %rax -; SSE-NEXT: movq 160(%rsp,%r10), %r8 -; SSE-NEXT: movq %r8, %r12 -; SSE-NEXT: shldq %cl, %rax, %r12 -; SSE-NEXT: shldq %cl, %r8, %rbx -; SSE-NEXT: movq 144(%rsp,%r10), %r9 -; SSE-NEXT: movq %r9, %r8 -; SSE-NEXT: shlq %cl, %r8 -; SSE-NEXT: shldq %cl, %r9, %rax -; SSE-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movl %edx, %edx -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) +; SSE-NEXT: movl %esi, %eax +; SSE-NEXT: shrl $3, %eax +; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: andl $56, %eax +; SSE-NEXT: negl %eax +; SSE-NEXT: movslq %eax, %r12 +; SSE-NEXT: movq 160(%rsp,%r12), %rax +; SSE-NEXT: movq 168(%rsp,%r12), %r10 +; SSE-NEXT: shldq %cl, %rax, %r10 +; SSE-NEXT: movq 152(%rsp,%r12), %rsi +; SSE-NEXT: shldq %cl, %rsi, %rax +; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: movq 144(%rsp,%r12), %r11 +; SSE-NEXT: shldq %cl, %r11, %rsi +; SSE-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: movq 136(%rsp,%r12), %rbx +; SSE-NEXT: shldq %cl, %rbx, %r11 +; SSE-NEXT: movq 128(%rsp,%r12), %r14 +; SSE-NEXT: shldq %cl, %r14, %rbx +; SSE-NEXT: movq 120(%rsp,%r12), %r15 +; SSE-NEXT: shldq %cl, %r15, %r14 +; SSE-NEXT: movq 112(%rsp,%r12), %r13 +; SSE-NEXT: shldq %cl, %r13, %r15 +; SSE-NEXT: shlq %cl, %r13 +; SSE-NEXT: movl %edx, %eax ; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) ; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, (%rsp) +; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp) +; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE-NEXT: movq %rdx, {{[0-9]+}}(%rsp) +; SSE-NEXT: movq %rax, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movq $0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movq 16(%rdi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 48(%rdi), %rsi -; SSE-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %rsi, %r13 -; SSE-NEXT: andq %rdx, %r12 -; SSE-NEXT: orq %r13, %r12 -; SSE-NEXT: movq %r15, %rsi -; SSE-NEXT: movq 56(%rdi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %rdx, %r15 -; SSE-NEXT: movq %rbx, %r13 -; SSE-NEXT: movq 24(%rdi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %rdx, %rbx -; SSE-NEXT: orq %r15, %rbx -; SSE-NEXT: movq %r14, %rbp -; SSE-NEXT: movq 32(%rdi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %rdx, %r14 -; SSE-NEXT: movq %r8, %r15 -; SSE-NEXT: movq (%rdi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %rdx, %r8 -; SSE-NEXT: orq %r14, %r8 -; SSE-NEXT: orq %r12, %r8 -; SSE-NEXT: movq %r11, %r12 -; SSE-NEXT: movq 40(%rdi), %r9 -; SSE-NEXT: andq %r9, %r11 -; SSE-NEXT: movq %rax, %r14 -; SSE-NEXT: movq 8(%rdi), %rdx +; SSE-NEXT: movq 32(%rsp,%r12), %rax +; SSE-NEXT: movq 40(%rsp,%r12), %rdx +; SSE-NEXT: shldq %cl, %rax, %rdx ; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq %rdx, %rax -; SSE-NEXT: orq %r11, %rax -; SSE-NEXT: orq %rbx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: movq 24(%rsp,%r12), %rdx +; SSE-NEXT: shldq %cl, %rdx, %rax +; SSE-NEXT: movq 16(%rsp,%r12), %rsi +; SSE-NEXT: shldq %cl, %rsi, %rdx +; SSE-NEXT: movq 8(%rsp,%r12), %r8 +; SSE-NEXT: shldq %cl, %r8, %rsi +; SSE-NEXT: movq (%rsp,%r12), %rbp +; SSE-NEXT: shldq %cl, %rbp, %r8 +; SSE-NEXT: movq -8(%rsp,%r12), %r9 +; SSE-NEXT: shldq %cl, %r9, %rbp +; SSE-NEXT: notq %r10 +; SSE-NEXT: andq 56(%rdi), %r10 +; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload +; SSE-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload +; SSE-NEXT: notq %r10 +; SSE-NEXT: andq 48(%rdi), %r10 +; SSE-NEXT: orq %rax, %r10 ; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; SSE-NEXT: notq %rax -; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload +; SSE-NEXT: andq 40(%rdi), %rax +; SSE-NEXT: orq %rdx, %rax ; SSE-NEXT: movq %rax, %rdx -; SSE-NEXT: movq 56(%rsp,%r10), %r11 -; SSE-NEXT: movq 64(%rsp,%r10), %rax -; SSE-NEXT: movq %rax, %rbx -; SSE-NEXT: shldq %cl, %r11, %rbx -; SSE-NEXT: orq %rbx, %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: notq %rsi -; SSE-NEXT: movq 72(%rsp,%r10), %rbx -; SSE-NEXT: shldq %cl, %rax, %rbx -; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload -; SSE-NEXT: orq %rbx, %rsi -; SSE-NEXT: notq %rbp -; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload -; SSE-NEXT: movq 40(%rsp,%r10), %rax -; SSE-NEXT: movq 48(%rsp,%r10), %rdx -; SSE-NEXT: movq %rdx, %rbx -; SSE-NEXT: shldq %cl, %rax, %rbx -; SSE-NEXT: orq %rbx, %rbp -; SSE-NEXT: notq %r12 -; SSE-NEXT: andq %r9, %r12 -; SSE-NEXT: shldq %cl, %rdx, %r11 -; SSE-NEXT: movq 24(%rsp,%r10), %r9 -; SSE-NEXT: movq 32(%rsp,%r10), %rdx -; SSE-NEXT: movq %rdx, %rbx -; SSE-NEXT: shldq %cl, %r9, %rbx -; SSE-NEXT: orq %r11, %r12 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload ; SSE-NEXT: notq %r11 -; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: orq %rbx, %r11 -; SSE-NEXT: notq %r13 -; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload -; SSE-NEXT: orq %rax, %r13 -; SSE-NEXT: notq %r15 -; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload -; SSE-NEXT: movq 16(%rsp,%r10), %rax -; SSE-NEXT: movq %rax, %rdx -; SSE-NEXT: shlq %cl, %rdx -; SSE-NEXT: orq %rdx, %r15 +; SSE-NEXT: andq 32(%rdi), %r11 +; SSE-NEXT: orq %rsi, %r11 +; SSE-NEXT: notq %rbx +; SSE-NEXT: andq 24(%rdi), %rbx +; SSE-NEXT: orq %r8, %rbx ; SSE-NEXT: notq %r14 -; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload -; SSE-NEXT: # kill: def $cl killed $cl killed $ecx +; SSE-NEXT: andq 16(%rdi), %r14 +; SSE-NEXT: orq %rbp, %r14 +; SSE-NEXT: notq %r15 +; SSE-NEXT: movq -16(%rsp,%r12), %rax ; SSE-NEXT: shldq %cl, %rax, %r9 -; SSE-NEXT: orq %r9, %r14 -; SSE-NEXT: orq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; SSE-NEXT: andq 8(%rdi), %r15 +; SSE-NEXT: orq %r9, %r15 +; SSE-NEXT: notq %r13 +; SSE-NEXT: # kill: def $cl killed $cl killed $ecx +; SSE-NEXT: shlq %cl, %rax +; SSE-NEXT: andq (%rdi), %r13 +; SSE-NEXT: orq %rax, %r13 +; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload +; SSE-NEXT: andl $60, %eax +; SSE-NEXT: movl (%rdi,%rax), %eax +; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload +; SSE-NEXT: btl %ecx, %eax ; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: movq %rax, 48(%rdi) -; SSE-NEXT: movq %rsi, 56(%rdi) -; SSE-NEXT: movq %rbp, 32(%rdi) -; SSE-NEXT: movq %r12, 40(%rdi) -; SSE-NEXT: movq %r11, 16(%rdi) -; SSE-NEXT: movq %r13, 24(%rdi) -; SSE-NEXT: movq %r15, (%rdi) -; SSE-NEXT: movq %r14, 8(%rdi) -; SSE-NEXT: sete %al -; SSE-NEXT: addq $216, %rsp +; SSE-NEXT: movq %rax, 56(%rdi) +; SSE-NEXT: movq %r10, 48(%rdi) +; SSE-NEXT: movq %rdx, 40(%rdi) +; SSE-NEXT: movq %r11, 32(%rdi) +; SSE-NEXT: movq %rbx, 24(%rdi) +; SSE-NEXT: movq %r14, 16(%rdi) +; SSE-NEXT: movq %r15, 8(%rdi) +; SSE-NEXT: movq %r13, (%rdi) +; SSE-NEXT: setae %al +; SSE-NEXT: addq $184, %rsp ; SSE-NEXT: popq %rbx ; SSE-NEXT: popq %r12 ; SSE-NEXT: popq %r13 @@ -3978,132 +1413,103 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; AVX2-NEXT: pushq %r13 ; AVX2-NEXT: pushq %r12 ; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: subq $200, %rsp +; AVX2-NEXT: subq $168, %rsp ; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) ; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) ; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) ; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [1,0,0,0] ; AVX2-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) -; AVX2-NEXT: movl %esi, %r8d -; AVX2-NEXT: andl $63, %r8d -; AVX2-NEXT: shrl $3, %esi -; AVX2-NEXT: andl $56, %esi -; AVX2-NEXT: negl %esi -; AVX2-NEXT: movslq %esi, %rsi -; AVX2-NEXT: movq 144(%rsp,%rsi), %r11 -; AVX2-NEXT: movq 152(%rsp,%rsi), %r12 -; AVX2-NEXT: movq %r12, %r10 -; AVX2-NEXT: movl %r8d, %ecx -; AVX2-NEXT: shldq %cl, %r11, %r10 -; AVX2-NEXT: movq 176(%rsp,%rsi), %r14 -; AVX2-NEXT: movq 184(%rsp,%rsi), %r9 -; AVX2-NEXT: shldq %cl, %r14, %r9 -; AVX2-NEXT: movq 160(%rsp,%rsi), %r15 -; AVX2-NEXT: movq 168(%rsp,%rsi), %r13 -; AVX2-NEXT: movq %r13, %rbx -; AVX2-NEXT: shldq %cl, %r15, %rbx -; AVX2-NEXT: movq 128(%rsp,%rsi), %rbp -; AVX2-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 136(%rsp,%rsi), %rax -; AVX2-NEXT: shldq %cl, %rax, %r11 -; AVX2-NEXT: shldq %cl, %r13, %r14 -; AVX2-NEXT: shldq %cl, %r12, %r15 -; AVX2-NEXT: shldq %cl, %rbp, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movl %edx, %edx +; AVX2-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVX2-NEXT: movl %esi, %ecx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: movl %esi, %r11d +; AVX2-NEXT: shrl $3, %r11d +; AVX2-NEXT: movl %r11d, %eax +; AVX2-NEXT: andl $56, %eax +; AVX2-NEXT: negl %eax +; AVX2-NEXT: movslq %eax, %r10 +; AVX2-NEXT: movq 104(%rsp,%r10), %r15 +; AVX2-NEXT: movq 112(%rsp,%r10), %rax +; AVX2-NEXT: movq %rax, %rsi +; AVX2-NEXT: shldq %cl, %r15, %rsi +; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: movq 120(%rsp,%r10), %rsi +; AVX2-NEXT: movq %rsi, %r8 +; AVX2-NEXT: shldq %cl, %rax, %r8 +; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: movq 128(%rsp,%r10), %rax +; AVX2-NEXT: movq %rax, %rbx +; AVX2-NEXT: shldq %cl, %rsi, %rbx +; AVX2-NEXT: movq 136(%rsp,%r10), %rsi +; AVX2-NEXT: movq %rsi, %r14 +; AVX2-NEXT: shldq %cl, %rax, %r14 +; AVX2-NEXT: movq 144(%rsp,%r10), %rax +; AVX2-NEXT: movq %rax, %r12 +; AVX2-NEXT: shldq %cl, %rsi, %r12 +; AVX2-NEXT: movq 96(%rsp,%r10), %rsi +; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX2-NEXT: movq 152(%rsp,%r10), %r13 +; AVX2-NEXT: shldq %cl, %rax, %r13 +; AVX2-NEXT: shldq %cl, %rsi, %r15 +; AVX2-NEXT: movl %edx, %eax ; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vmovups %xmm1, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) ; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) ; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movq %rdx, (%rsp) +; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) ; AVX2-NEXT: movq $0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: movq 16(%rdi), %r12 -; AVX2-NEXT: movq 48(%rdi), %rbp -; AVX2-NEXT: movq 32(%rdi), %r13 -; AVX2-NEXT: andnq %r13, %r15, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq %r15, %r13 -; AVX2-NEXT: andnq %rbp, %r14, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq %r14, %rbp -; AVX2-NEXT: andnq %r12, %r11, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq %r11, %r12 -; AVX2-NEXT: movq 40(%rdi), %rax +; AVX2-NEXT: movq 16(%rsp,%r10), %rbp +; AVX2-NEXT: movq 24(%rsp,%r10), %r9 +; AVX2-NEXT: shldq %cl, %rbp, %r9 +; AVX2-NEXT: movq 8(%rsp,%r10), %rdx +; AVX2-NEXT: shldq %cl, %rdx, %rbp +; AVX2-NEXT: movq (%rsp,%r10), %rax +; AVX2-NEXT: shldq %cl, %rax, %rdx +; AVX2-NEXT: movq -8(%rsp,%r10), %r8 +; AVX2-NEXT: shldq %cl, %r8, %rax +; AVX2-NEXT: movq -16(%rsp,%r10), %rsi +; AVX2-NEXT: shldq %cl, %rsi, %r8 +; AVX2-NEXT: andnq 56(%rdi), %r13, %r13 +; AVX2-NEXT: orq %r9, %r13 +; AVX2-NEXT: movq -24(%rsp,%r10), %r9 +; AVX2-NEXT: shldq %cl, %r9, %rsi +; AVX2-NEXT: andnq 48(%rdi), %r12, %r12 +; AVX2-NEXT: andnq 40(%rdi), %r14, %r14 ; AVX2-NEXT: orq %rbp, %r12 -; AVX2-NEXT: andnq %rax, %rbx, %rcx -; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq %rax, %rbp -; AVX2-NEXT: andq %rbx, %rbp -; AVX2-NEXT: movq 56(%rdi), %rcx -; AVX2-NEXT: andnq %rcx, %r9, %rbx -; AVX2-NEXT: andq %r9, %rcx -; AVX2-NEXT: movq 24(%rdi), %rax -; AVX2-NEXT: andnq %rax, %r10, %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq %r10, %rax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: shlxq %r8, {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload -; AVX2-NEXT: movq (%rdi), %r10 -; AVX2-NEXT: andnq %r10, %rcx, %r15 -; AVX2-NEXT: andq %rcx, %r10 -; AVX2-NEXT: movq 40(%rsp,%rsi), %rdx -; AVX2-NEXT: movq 48(%rsp,%rsi), %r11 -; AVX2-NEXT: movq %r11, %r9 -; AVX2-NEXT: movl %r8d, %ecx -; AVX2-NEXT: shldq %cl, %rdx, %r9 -; AVX2-NEXT: orq %r13, %r10 -; AVX2-NEXT: orq %r12, %r10 -; AVX2-NEXT: movq 8(%rdi), %r13 +; AVX2-NEXT: orq %rdx, %r14 +; AVX2-NEXT: andnq 32(%rdi), %rbx, %rdx +; AVX2-NEXT: orq %rax, %rdx +; AVX2-NEXT: shlxq %rcx, {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload +; AVX2-NEXT: movq -32(%rsp,%r10), %r10 +; AVX2-NEXT: shlxq %rcx, %r10, %rbx +; AVX2-NEXT: # kill: def $cl killed $cl killed $rcx +; AVX2-NEXT: shldq %cl, %r10, %r9 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload -; AVX2-NEXT: andnq %r13, %rcx, %r12 -; AVX2-NEXT: andq %rcx, %r13 -; AVX2-NEXT: orq %rbp, %r13 -; AVX2-NEXT: orq %rax, %r13 -; AVX2-NEXT: movq 56(%rsp,%rsi), %rax -; AVX2-NEXT: movl %r8d, %ecx -; AVX2-NEXT: shldq %cl, %r11, %rax -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX2-NEXT: orq %r9, %r14 -; AVX2-NEXT: orq %rax, %rbx -; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 24(%rsp,%rsi), %rax -; AVX2-NEXT: movq 32(%rsp,%rsi), %r9 -; AVX2-NEXT: movq %r9, %r11 -; AVX2-NEXT: shldq %cl, %rax, %r11 -; AVX2-NEXT: shldq %cl, %r9, %rdx -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload -; AVX2-NEXT: orq %r11, %rbp -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload -; AVX2-NEXT: orq %rdx, %rbx -; AVX2-NEXT: movq 8(%rsp,%rsi), %rdx -; AVX2-NEXT: movq 16(%rsp,%rsi), %r9 -; AVX2-NEXT: movq %r9, %r11 -; AVX2-NEXT: shldq %cl, %rdx, %r11 -; AVX2-NEXT: shldq %cl, %r9, %rax -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; AVX2-NEXT: orq %r11, %r9 -; AVX2-NEXT: movq (%rsp,%rsi), %rsi -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload -; AVX2-NEXT: orq %rax, %r11 -; AVX2-NEXT: shlxq %r8, %rsi, %rax -; AVX2-NEXT: shldq %cl, %rsi, %rdx -; AVX2-NEXT: orq %rax, %r15 -; AVX2-NEXT: orq %rdx, %r12 -; AVX2-NEXT: orq %r10, %r13 -; AVX2-NEXT: movq %r14, 48(%rdi) -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: movq %rax, 56(%rdi) -; AVX2-NEXT: movq %rbp, 32(%rdi) -; AVX2-NEXT: movq %rbx, 40(%rdi) -; AVX2-NEXT: movq %r9, 16(%rdi) -; AVX2-NEXT: movq %r11, 24(%rdi) -; AVX2-NEXT: movq %r15, (%rdi) -; AVX2-NEXT: movq %r12, 8(%rdi) -; AVX2-NEXT: sete %al -; AVX2-NEXT: addq $200, %rsp +; AVX2-NEXT: andnq 24(%rdi), %rcx, %rcx +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload +; AVX2-NEXT: andnq 16(%rdi), %r10, %r10 +; AVX2-NEXT: orq %r8, %rcx +; AVX2-NEXT: orq %rsi, %r10 +; AVX2-NEXT: andnq 8(%rdi), %r15, %rsi +; AVX2-NEXT: orq %r9, %rsi +; AVX2-NEXT: andnq (%rdi), %rax, %rax +; AVX2-NEXT: orq %rbx, %rax +; AVX2-NEXT: andl $60, %r11d +; AVX2-NEXT: movl (%rdi,%r11), %r8d +; AVX2-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %r9d # 4-byte Reload +; AVX2-NEXT: btl %r9d, %r8d +; AVX2-NEXT: movq %r13, 56(%rdi) +; AVX2-NEXT: movq %r12, 48(%rdi) +; AVX2-NEXT: movq %r14, 40(%rdi) +; AVX2-NEXT: movq %rdx, 32(%rdi) +; AVX2-NEXT: movq %rcx, 24(%rdi) +; AVX2-NEXT: movq %r10, 16(%rdi) +; AVX2-NEXT: movq %rsi, 8(%rdi) +; AVX2-NEXT: movq %rax, (%rdi) +; AVX2-NEXT: setae %al +; AVX2-NEXT: addq $168, %rsp ; AVX2-NEXT: popq %rbx ; AVX2-NEXT: popq %r12 ; AVX2-NEXT: popq %r13 @@ -4121,131 +1527,100 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind { ; AVX512-NEXT: pushq %r13 ; AVX512-NEXT: pushq %r12 ; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: subq $184, %rsp +; AVX512-NEXT: subq $152, %rsp ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) ; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) ; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) ; AVX512-NEXT: vmovaps {{.*#+}} xmm1 = [1,0,0,0] ; AVX512-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) +; AVX512-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; AVX512-NEXT: movl %esi, %ecx ; AVX512-NEXT: andl $63, %ecx -; AVX512-NEXT: shrl $3, %esi -; AVX512-NEXT: andl $56, %esi -; AVX512-NEXT: negl %esi -; AVX512-NEXT: movslq %esi, %rsi -; AVX512-NEXT: movq 128(%rsp,%rsi), %r10 -; AVX512-NEXT: movq 136(%rsp,%rsi), %r12 -; AVX512-NEXT: movq %r12, %rax -; AVX512-NEXT: shldq %cl, %r10, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 160(%rsp,%rsi), %r14 -; AVX512-NEXT: movq 168(%rsp,%rsi), %rax -; AVX512-NEXT: shldq %cl, %r14, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 144(%rsp,%rsi), %r15 -; AVX512-NEXT: movq 152(%rsp,%rsi), %r11 -; AVX512-NEXT: movq %r11, %rbx -; AVX512-NEXT: shldq %cl, %r15, %rbx -; AVX512-NEXT: movq 120(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rax, %r10 -; AVX512-NEXT: shldq %cl, %r11, %r14 -; AVX512-NEXT: movq %rdi, %r9 -; AVX512-NEXT: movq 112(%rsp,%rsi), %r11 -; AVX512-NEXT: shldq %cl, %r12, %r15 -; AVX512-NEXT: movl %edx, %edx +; AVX512-NEXT: movl %esi, %r8d +; AVX512-NEXT: shrl $3, %r8d +; AVX512-NEXT: movl %r8d, %eax +; AVX512-NEXT: andl $56, %eax +; AVX512-NEXT: negl %eax +; AVX512-NEXT: movslq %eax, %r9 +; AVX512-NEXT: movq 88(%rsp,%r9), %r10 +; AVX512-NEXT: movq 96(%rsp,%r9), %rax +; AVX512-NEXT: movq %rax, %rsi +; AVX512-NEXT: shldq %cl, %r10, %rsi +; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: movq 104(%rsp,%r9), %rsi +; AVX512-NEXT: movq %rsi, %r11 +; AVX512-NEXT: shldq %cl, %rax, %r11 +; AVX512-NEXT: movq 112(%rsp,%r9), %rax +; AVX512-NEXT: movq %rax, %rbx +; AVX512-NEXT: shldq %cl, %rsi, %rbx +; AVX512-NEXT: movq 120(%rsp,%r9), %rsi +; AVX512-NEXT: movq %rsi, %r14 +; AVX512-NEXT: shldq %cl, %rax, %r14 +; AVX512-NEXT: movq 128(%rsp,%r9), %rax +; AVX512-NEXT: movq %rax, %r12 +; AVX512-NEXT: shldq %cl, %rsi, %r12 +; AVX512-NEXT: movq 136(%rsp,%r9), %r13 +; AVX512-NEXT: shldq %cl, %rax, %r13 +; AVX512-NEXT: movq 80(%rsp,%r9), %r15 +; AVX512-NEXT: shldq %cl, %r15, %r10 +; AVX512-NEXT: movl %edx, %eax ; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX512-NEXT: vmovups %xmm1, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovups %xmm1, -{{[0-9]+}}(%rsp) ; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) ; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) ; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) -; AVX512-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) ; AVX512-NEXT: movq $0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: movq 16(%rdi), %r12 -; AVX512-NEXT: movq 48(%rdi), %r13 -; AVX512-NEXT: movq 32(%rdi), %rbp -; AVX512-NEXT: andnq %rbp, %r15, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: andq %r15, %rbp -; AVX512-NEXT: andnq %r13, %r14, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: andq %r14, %r13 -; AVX512-NEXT: andnq %r12, %r10, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: andq %r10, %r12 -; AVX512-NEXT: movq 40(%rdi), %r8 -; AVX512-NEXT: orq %r13, %r12 -; AVX512-NEXT: andnq %r8, %rbx, %rdi -; AVX512-NEXT: andq %rbx, %r8 -; AVX512-NEXT: movq 56(%r9), %r13 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; AVX512-NEXT: andnq %r13, %rdx, %r10 -; AVX512-NEXT: andq %rdx, %r13 -; AVX512-NEXT: movq 24(%r9), %rax -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; AVX512-NEXT: andnq %rax, %rdx, %r15 -; AVX512-NEXT: andq %rdx, %rax -; AVX512-NEXT: orq %r13, %rax -; AVX512-NEXT: shlxq %rcx, %r11, %r13 -; AVX512-NEXT: movq (%r9), %rdx -; AVX512-NEXT: andnq %rdx, %r13, %r14 -; AVX512-NEXT: andq %r13, %rdx -; AVX512-NEXT: orq %rbp, %rdx -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r11, %rbp -; AVX512-NEXT: orq %r12, %rdx -; AVX512-NEXT: movq 8(%r9), %r13 -; AVX512-NEXT: andnq %r13, %rbp, %rbx -; AVX512-NEXT: andq %rbp, %r13 -; AVX512-NEXT: orq %r8, %r13 -; AVX512-NEXT: movq 24(%rsp,%rsi), %r8 -; AVX512-NEXT: orq %rax, %r13 -; AVX512-NEXT: movq 32(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, %r12 -; AVX512-NEXT: shldq %cl, %r8, %r12 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload -; AVX512-NEXT: orq %r12, %r11 -; AVX512-NEXT: movq 40(%rsp,%rsi), %r12 -; AVX512-NEXT: shldq %cl, %rax, %r12 -; AVX512-NEXT: orq %r12, %r10 -; AVX512-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 8(%rsp,%rsi), %rax -; AVX512-NEXT: movq 16(%rsp,%rsi), %r12 -; AVX512-NEXT: movq %r12, %rbp -; AVX512-NEXT: shldq %cl, %rax, %rbp -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX512-NEXT: orq %rbp, %r10 -; AVX512-NEXT: shldq %cl, %r12, %r8 -; AVX512-NEXT: orq %r8, %rdi -; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq -8(%rsp,%rsi), %r8 -; AVX512-NEXT: movq (%rsp,%rsi), %r12 -; AVX512-NEXT: movq %r12, %rbp -; AVX512-NEXT: shldq %cl, %r8, %rbp -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload -; AVX512-NEXT: orq %rbp, %rdi -; AVX512-NEXT: movq -16(%rsp,%rsi), %rsi -; AVX512-NEXT: shldq %cl, %r12, %rax +; AVX512-NEXT: movq (%rsp,%r9), %rbp +; AVX512-NEXT: movq 8(%rsp,%r9), %rsi +; AVX512-NEXT: shldq %cl, %rbp, %rsi +; AVX512-NEXT: movq -8(%rsp,%r9), %rdx +; AVX512-NEXT: shldq %cl, %rdx, %rbp +; AVX512-NEXT: movq -16(%rsp,%r9), %rax +; AVX512-NEXT: shldq %cl, %rax, %rdx +; AVX512-NEXT: andnq 56(%rdi), %r13, %r13 +; AVX512-NEXT: andnq 48(%rdi), %r12, %r12 +; AVX512-NEXT: orq %rsi, %r13 +; AVX512-NEXT: orq %rbp, %r12 +; AVX512-NEXT: andnq 40(%rdi), %r14, %r14 +; AVX512-NEXT: orq %rdx, %r14 +; AVX512-NEXT: movq -24(%rsp,%r9), %rsi +; AVX512-NEXT: shldq %cl, %rsi, %rax +; AVX512-NEXT: andnq 32(%rdi), %rbx, %rdx +; AVX512-NEXT: orq %rax, %rdx +; AVX512-NEXT: movq -32(%rsp,%r9), %rax +; AVX512-NEXT: shldq %cl, %rax, %rsi +; AVX512-NEXT: shlxq %rcx, %r15, %rbx +; AVX512-NEXT: andnq 24(%rdi), %r11, %r11 +; AVX512-NEXT: orq %rsi, %r11 +; AVX512-NEXT: movq -48(%rsp,%r9), %rsi +; AVX512-NEXT: movq -40(%rsp,%r9), %r9 +; AVX512-NEXT: shldq %cl, %r9, %rax +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload +; AVX512-NEXT: andnq 16(%rdi), %r15, %r15 ; AVX512-NEXT: orq %rax, %r15 ; AVX512-NEXT: shlxq %rcx, %rsi, %rax ; AVX512-NEXT: # kill: def $cl killed $cl killed $rcx -; AVX512-NEXT: shldq %cl, %rsi, %r8 -; AVX512-NEXT: orq %rax, %r14 -; AVX512-NEXT: orq %r8, %rbx -; AVX512-NEXT: orq %rdx, %r13 -; AVX512-NEXT: movq %r11, 48(%r9) -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: movq %rax, 56(%r9) -; AVX512-NEXT: movq %r10, 32(%r9) -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: movq %rax, 40(%r9) -; AVX512-NEXT: movq %rdi, 16(%r9) -; AVX512-NEXT: movq %r15, 24(%r9) -; AVX512-NEXT: movq %r14, (%r9) -; AVX512-NEXT: movq %rbx, 8(%r9) -; AVX512-NEXT: sete %al -; AVX512-NEXT: addq $184, %rsp +; AVX512-NEXT: shldq %cl, %rsi, %r9 +; AVX512-NEXT: andnq 8(%rdi), %r10, %rcx +; AVX512-NEXT: orq %r9, %rcx +; AVX512-NEXT: andnq (%rdi), %rbx, %rsi +; AVX512-NEXT: orq %rax, %rsi +; AVX512-NEXT: andl $60, %r8d +; AVX512-NEXT: movl (%rdi,%r8), %eax +; AVX512-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 4-byte Reload +; AVX512-NEXT: btl %r8d, %eax +; AVX512-NEXT: movq %r13, 56(%rdi) +; AVX512-NEXT: movq %r12, 48(%rdi) +; AVX512-NEXT: movq %r14, 40(%rdi) +; AVX512-NEXT: movq %rdx, 32(%rdi) +; AVX512-NEXT: movq %r11, 24(%rdi) +; AVX512-NEXT: movq %r15, 16(%rdi) +; AVX512-NEXT: movq %rcx, 8(%rdi) +; AVX512-NEXT: movq %rsi, (%rdi) +; AVX512-NEXT: setae %al +; AVX512-NEXT: addq $152, %rsp ; AVX512-NEXT: popq %rbx ; AVX512-NEXT: popq %r12 ; AVX512-NEXT: popq %r13 @@ -4274,2749 +1649,25 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind { define i1 @test_ne_i4096(ptr %word, i32 %position) nounwind { ; X86-LABEL: test_ne_i4096: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: movl %esp, %ebp -; X86-NEXT: pushl %ebx -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: andl $-16, %esp -; X86-NEXT: subl $1792, %esp # imm = 0x700 -; X86-NEXT: movl 12(%ebp), %ebx -; X86-NEXT: movl %ebx, %ecx -; X86-NEXT: shrl $3, %ecx -; X86-NEXT: andl $508, %ecx # imm = 0x1FC -; X86-NEXT: leal {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: subl %ecx, %esi -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $1, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl $0, {{[0-9]+}}(%esp) -; X86-NEXT: movl 248(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 252(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: andl $31, %ebx -; X86-NEXT: movl %ebx, %ecx -; X86-NEXT: shldl %cl, %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 504(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 508(%esi), %edx -; X86-NEXT: shldl %cl, %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 120(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 124(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 376(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 380(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 184(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 188(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 440(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 444(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 56(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 60(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 312(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 316(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 216(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 220(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 472(%esi), %edi -; X86-NEXT: movl 476(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edi, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 88(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 92(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 344(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 348(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 152(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 156(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 408(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 412(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 24(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 28(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 280(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 284(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 232(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 236(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 488(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 492(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 104(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 108(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 360(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 364(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 168(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 172(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 424(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 428(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 40(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 44(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 296(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 300(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 200(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 204(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 456(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 460(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 72(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 76(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 328(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 332(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 136(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 140(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 392(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 396(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 12(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 264(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 268(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 240(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 244(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 496(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 500(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 112(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 116(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 368(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 372(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 176(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 180(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 432(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 436(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 48(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 52(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 304(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 308(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 208(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 212(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 464(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 468(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 80(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 84(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 336(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 340(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 144(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 148(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 400(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 404(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 16(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 20(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 272(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 276(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 224(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 228(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 480(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 484(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 96(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 100(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 352(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 356(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 160(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 164(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 416(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 420(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 32(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 36(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 288(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 292(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 192(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 196(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 448(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 452(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 64(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 68(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 320(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 324(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 128(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 132(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %edi, %edx -; X86-NEXT: movl 256(%esi), %edi -; X86-NEXT: movl 260(%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edi, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: shldl %cl, %edi, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl 388(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl 4(%esi), %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shrdl $1, %eax, %edi -; X86-NEXT: shrl %eax -; X86-NEXT: movl %ebx, %edx -; X86-NEXT: movl %eax, %ebx -; X86-NEXT: notb %cl -; X86-NEXT: shrdl %cl, %eax, %edi -; X86-NEXT: shrl %cl, %ebx -; X86-NEXT: movb $32, %cl -; X86-NEXT: testb %cl, %cl -; X86-NEXT: movl (%esi), %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl 8(%ebp), %eax -; X86-NEXT: jne .LBB20_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %edi, %ebx -; X86-NEXT: .LBB20_2: -; X86-NEXT: movl %edx, %ecx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: shll %cl, %edx -; X86-NEXT: orl %ebx, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 320(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl 64(%eax), %esi -; X86-NEXT: orl %ecx, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 448(%eax), %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 192(%eax), %ecx -; X86-NEXT: orl %edx, %ecx -; X86-NEXT: orl %esi, %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 288(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 32(%eax), %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 416(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: andl 160(%eax), %edi -; X86-NEXT: orl %ecx, %edi -; X86-NEXT: orl %edx, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 352(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl 96(%eax), %esi -; X86-NEXT: orl %ecx, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 480(%eax), %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 224(%eax), %ecx -; X86-NEXT: orl %edx, %ecx -; X86-NEXT: orl %esi, %ecx -; X86-NEXT: orl %edi, %ecx -; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 272(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 16(%eax), %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 400(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl 144(%eax), %esi -; X86-NEXT: orl %ecx, %esi -; X86-NEXT: orl %edx, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 336(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 80(%eax), %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 464(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: andl 208(%eax), %edi -; X86-NEXT: orl %ecx, %edi -; X86-NEXT: orl %edx, %edi -; X86-NEXT: orl %esi, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 304(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 48(%eax), %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 432(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl 176(%eax), %esi -; X86-NEXT: orl %ecx, %esi -; X86-NEXT: orl %edx, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 368(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 112(%eax), %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 496(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: andl 240(%eax), %ebx -; X86-NEXT: orl %ecx, %ebx -; X86-NEXT: orl %edx, %ebx -; X86-NEXT: orl %esi, %ebx -; X86-NEXT: orl %edi, %ebx -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 264(%eax), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 8(%eax), %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl %eax, %ebx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 392(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 136(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: orl %edx, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 328(%ebx), %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 72(%ebx), %eax -; X86-NEXT: orl %edx, %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: andl 456(%ebx), %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl 200(%ebx), %esi -; X86-NEXT: orl %edi, %esi -; X86-NEXT: orl %eax, %esi -; X86-NEXT: orl %ecx, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: andl 296(%ebx), %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 40(%ebx), %eax -; X86-NEXT: orl %edi, %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: andl 424(%ebx), %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 168(%ebx), %edx -; X86-NEXT: orl %edi, %edx -; X86-NEXT: orl %eax, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 360(%ebx), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 104(%ebx), %eax -; X86-NEXT: orl %ecx, %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 488(%ebx), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: andl 232(%ebx), %edi -; X86-NEXT: orl %ecx, %edi -; X86-NEXT: orl %eax, %edi -; X86-NEXT: orl %edx, %edi -; X86-NEXT: orl %esi, %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 280(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 24(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 408(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 152(%ebx), %edx -; X86-NEXT: orl %eax, %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 344(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 88(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 472(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl 216(%ebx), %esi -; X86-NEXT: orl %eax, %esi -; X86-NEXT: orl %ecx, %esi -; X86-NEXT: orl %edx, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 312(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 56(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 440(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 184(%ebx), %edx -; X86-NEXT: orl %eax, %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 376(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 120(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 504(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: andl 248(%ebx), %edi -; X86-NEXT: orl %eax, %edi -; X86-NEXT: orl %ecx, %edi -; X86-NEXT: orl %edx, %edi -; X86-NEXT: orl %esi, %edi -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 324(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 68(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 452(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 196(%ebx), %edx -; X86-NEXT: orl %eax, %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 292(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 36(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 420(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 164(%ebx), %edx -; X86-NEXT: orl %eax, %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 356(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 100(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 484(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl 228(%ebx), %esi -; X86-NEXT: orl %eax, %esi -; X86-NEXT: orl %ecx, %esi -; X86-NEXT: orl %edx, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 276(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 20(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 404(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 148(%ebx), %edx -; X86-NEXT: orl %eax, %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 340(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 84(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 468(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl 212(%ebx), %esi -; X86-NEXT: orl %eax, %esi -; X86-NEXT: orl %ecx, %esi -; X86-NEXT: orl %edx, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 308(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 52(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 436(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 180(%ebx), %edx -; X86-NEXT: orl %eax, %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 372(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 116(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 500(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: andl 244(%ebx), %edi -; X86-NEXT: orl %eax, %edi -; X86-NEXT: orl %ecx, %edi -; X86-NEXT: orl %edx, %edi -; X86-NEXT: orl %esi, %edi -; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 268(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 12(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 396(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 140(%ebx), %edx -; X86-NEXT: orl %eax, %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 332(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 76(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 460(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: andl 204(%ebx), %edi -; X86-NEXT: orl %eax, %edi -; X86-NEXT: orl %ecx, %edi -; X86-NEXT: orl %edx, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 300(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 44(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 428(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 172(%ebx), %edx -; X86-NEXT: orl %eax, %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 364(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 108(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 492(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: andl 236(%ebx), %esi -; X86-NEXT: orl %eax, %esi -; X86-NEXT: orl %ecx, %esi -; X86-NEXT: orl %edx, %esi -; X86-NEXT: orl %edi, %esi -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 284(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 28(%ebx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 412(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: andl 156(%ebx), %edi -; X86-NEXT: orl %eax, %edi -; X86-NEXT: orl %ecx, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 348(%ebx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 92(%ebx), %edx -; X86-NEXT: orl %eax, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 476(%ebx), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 220(%ebx), %eax -; X86-NEXT: orl %ecx, %eax -; X86-NEXT: orl %edx, %eax -; X86-NEXT: orl %edi, %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 316(%ebx), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 60(%ebx), %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 444(%ebx), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload -; X86-NEXT: andl 188(%ebx), %edi -; X86-NEXT: orl %ecx, %edi -; X86-NEXT: orl %edx, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 380(%ebx), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: andl 124(%ebx), %edx -; X86-NEXT: orl %ecx, %edx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 508(%ebx), %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-NEXT: movl 8(%ebp), %esi -; X86-NEXT: andl 252(%esi), %ebx -; X86-NEXT: orl %ecx, %ebx -; X86-NEXT: orl %edx, %ebx -; X86-NEXT: orl %edi, %ebx -; X86-NEXT: orl %eax, %ebx -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: negl %ecx -; X86-NEXT: movl 1648(%esp,%ecx), %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload -; X86-NEXT: shldl %cl, %edi, %esi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload -; X86-NEXT: shldl %cl, %edx, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: movl 8(%ebp), %edx -; X86-NEXT: andl 128(%edx), %ecx -; X86-NEXT: andl 384(%edx), %edi -; X86-NEXT: orl %ecx, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-NEXT: shll %cl, %eax -; X86-NEXT: andl (%edx), %eax -; X86-NEXT: orl %eax, %edi -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 256(%edx), %eax -; X86-NEXT: orl %eax, %edi -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 260(%edx), %eax -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload -; X86-NEXT: andl 4(%edx), %ecx -; X86-NEXT: orl %eax, %ecx -; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-NEXT: andl 132(%edx), %eax -; X86-NEXT: andl 388(%edx), %esi -; X86-NEXT: orl %eax, %esi -; X86-NEXT: orl %ecx, %esi -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload -; X86-NEXT: orl %ebx, %esi -; X86-NEXT: orl %edi, %esi -; X86-NEXT: setne %al -; X86-NEXT: leal -12(%ebp), %esp -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl %ecx, %edx +; X86-NEXT: andl $4064, %edx # imm = 0xFE0 +; X86-NEXT: shrl $3, %edx +; X86-NEXT: movl (%eax,%edx), %eax +; X86-NEXT: btl %ecx, %eax +; X86-NEXT: setb %al ; X86-NEXT: retl ; -; SSE-LABEL: test_ne_i4096: -; SSE: # %bb.0: -; SSE-NEXT: pushq %rbp -; SSE-NEXT: pushq %r15 -; SSE-NEXT: pushq %r14 -; SSE-NEXT: pushq %r13 -; SSE-NEXT: pushq %r12 -; SSE-NEXT: pushq %rbx -; SSE-NEXT: subq $1576, %rsp # imm = 0x628 -; SSE-NEXT: movl %esi, %ecx -; SSE-NEXT: movl %esi, %eax -; SSE-NEXT: andl $4032, %eax # imm = 0xFC0 -; SSE-NEXT: xorps %xmm0, %xmm0 -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movq $0, {{[0-9]+}}(%rsp) -; SSE-NEXT: movq $1, {{[0-9]+}}(%rsp) -; SSE-NEXT: andl $63, %ecx -; SSE-NEXT: shrl $3, %eax -; SSE-NEXT: negl %eax -; SSE-NEXT: movslq %eax, %rsi -; SSE-NEXT: movq 1296(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1304(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1552(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1560(%rsp,%rsi), %rax -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1168(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1176(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1424(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1432(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1232(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1240(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1488(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1496(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1104(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1112(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1360(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, (%rsp) # 8-byte Spill -; SSE-NEXT: movq 1368(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1264(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1272(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1520(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1528(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1136(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1144(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1392(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1400(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1200(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1208(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1456(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1464(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1072(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1080(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1328(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1336(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1280(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1288(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1536(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1544(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1152(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1160(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1408(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1416(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1216(%rsp,%rsi), %r11 -; SSE-NEXT: movq 1224(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %r11, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1472(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1480(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1088(%rsp,%rsi), %r9 -; SSE-NEXT: movq 1096(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %r9, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1344(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1352(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1248(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1256(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rax, %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1504(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1512(%rsp,%rsi), %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rdx, %rax -; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1120(%rsp,%rsi), %rax -; SSE-NEXT: movq 1128(%rsp,%rsi), %r8 -; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: shldq %cl, %rax, %r8 -; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1376(%rsp,%rsi), %r13 -; SSE-NEXT: movq 1384(%rsp,%rsi), %rbx -; SSE-NEXT: movq %rbx, %r8 -; SSE-NEXT: shldq %cl, %r13, %r8 -; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1184(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1192(%rsp,%rsi), %r15 -; SSE-NEXT: movq %r15, %r14 -; SSE-NEXT: shldq %cl, %rdx, %r14 -; SSE-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1440(%rsp,%rsi), %r10 -; SSE-NEXT: movq 1448(%rsp,%rsi), %rdx -; SSE-NEXT: movq %rdx, %r14 -; SSE-NEXT: shldq %cl, %r10, %r14 -; SSE-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1312(%rsp,%rsi), %r14 -; SSE-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq 1320(%rsp,%rsi), %rbp -; SSE-NEXT: movq %rbp, %r12 -; SSE-NEXT: shldq %cl, %r14, %r12 -; SSE-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r8, (%rsp) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq 1064(%rsp,%rsi), %rbx -; SSE-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; SSE-NEXT: shldq %cl, %rbp, %r14 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; SSE-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; SSE-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; SSE-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; SSE-NEXT: shldq %cl, %rdx, %r11 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r15, %rdx -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r15, %r9 -; SSE-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r15, %r8 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r15, %rbp -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r15, %r9 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r15, %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r15, %r13 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r12, %r15 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload -; SSE-NEXT: shldq %cl, %r12, %r10 -; SSE-NEXT: andq 384(%rdi), %r10 -; SSE-NEXT: andq 128(%rdi), %r15 -; SSE-NEXT: andq 320(%rdi), %r13 -; SSE-NEXT: andq 64(%rdi), %rax -; SSE-NEXT: orq %r10, %r15 -; SSE-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: orq %r13, %rax -; SSE-NEXT: andq 448(%rdi), %r9 -; SSE-NEXT: andq 192(%rdi), %rbp -; SSE-NEXT: orq %r9, %rbp -; SSE-NEXT: orq %rax, %rbp -; SSE-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: andq 288(%rdi), %r8 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; SSE-NEXT: andq 32(%rdi), %r9 -; SSE-NEXT: andq 416(%rdi), %rdx -; SSE-NEXT: andq 160(%rdi), %r11 -; SSE-NEXT: orq %r8, %r9 -; SSE-NEXT: orq %rdx, %r11 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; SSE-NEXT: andq 352(%rdi), %rdx -; SSE-NEXT: orq %r9, %r11 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 96(%rdi), %rax -; SSE-NEXT: orq %rdx, %rax -; SSE-NEXT: movq %rax, %rdx -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 480(%rdi), %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: andq 224(%rdi), %r8 -; SSE-NEXT: orq %rax, %r8 -; SSE-NEXT: orq %rdx, %r8 -; SSE-NEXT: andq 272(%rdi), %r14 -; SSE-NEXT: orq %r11, %r8 -; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 16(%rdi), %rax -; SSE-NEXT: orq %r14, %rax -; SSE-NEXT: movq %rax, %r8 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; SSE-NEXT: andq 400(%rdi), %rdx -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 144(%rdi), %rax -; SSE-NEXT: orq %rdx, %rax -; SSE-NEXT: orq %r8, %rax -; SSE-NEXT: movq %rax, %r8 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; SSE-NEXT: andq 336(%rdi), %r9 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 80(%rdi), %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; SSE-NEXT: andq 464(%rdi), %rdx -; SSE-NEXT: orq %r9, %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload -; SSE-NEXT: andq 208(%rdi), %r11 -; SSE-NEXT: orq %rdx, %r11 -; SSE-NEXT: orq %rax, %r11 -; SSE-NEXT: orq %r8, %r11 -; SSE-NEXT: movq (%rsp), %rdx # 8-byte Reload -; SSE-NEXT: andq 304(%rdi), %rdx -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 48(%rdi), %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; SSE-NEXT: andq 432(%rdi), %r9 -; SSE-NEXT: orq %rdx, %rax -; SSE-NEXT: movq %rax, %r10 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: andq 176(%rdi), %r8 -; SSE-NEXT: orq %r9, %r8 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; SSE-NEXT: andq 368(%rdi), %r9 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 112(%rdi), %rax -; SSE-NEXT: orq %r10, %r8 -; SSE-NEXT: movq %r8, %r10 -; SSE-NEXT: orq %r9, %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: andq 496(%rdi), %r8 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload -; SSE-NEXT: andq 240(%rdi), %rbp -; SSE-NEXT: orq %r8, %rbp -; SSE-NEXT: orq %rax, %rbp -; SSE-NEXT: orq %r10, %rbp -; SSE-NEXT: orq %r11, %rbp -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 392(%rdi), %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload -; SSE-NEXT: andq 136(%rdi), %r12 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; SSE-NEXT: andq 328(%rdi), %rdx -; SSE-NEXT: orq %rax, %r12 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 72(%rdi), %rax -; SSE-NEXT: orq %rdx, %rax -; SSE-NEXT: movq %rax, %rdx -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 456(%rdi), %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; SSE-NEXT: andq 200(%rdi), %r13 -; SSE-NEXT: orq %rax, %r13 -; SSE-NEXT: orq %rdx, %r13 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; SSE-NEXT: andq 296(%rdi), %rdx -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 40(%rdi), %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: andq 424(%rdi), %r8 -; SSE-NEXT: orq %rdx, %rax -; SSE-NEXT: movq %rax, %r9 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; SSE-NEXT: andq 168(%rdi), %rdx -; SSE-NEXT: orq %r8, %rdx -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: andq 360(%rdi), %r8 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 104(%rdi), %rax -; SSE-NEXT: orq %r9, %rdx -; SSE-NEXT: orq %r8, %rax -; SSE-NEXT: movq %rax, %r8 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 488(%rdi), %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; SSE-NEXT: andq 232(%rdi), %r15 -; SSE-NEXT: orq %rax, %r15 -; SSE-NEXT: orq %r8, %r15 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: andq 280(%rdi), %r8 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 24(%rdi), %rax -; SSE-NEXT: orq %rdx, %r15 -; SSE-NEXT: orq %r8, %rax -; SSE-NEXT: movq %rax, %r10 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: andq 408(%rdi), %r8 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 152(%rdi), %rax -; SSE-NEXT: orq %r8, %rax -; SSE-NEXT: orq %r10, %rax -; SSE-NEXT: movq %rax, %r10 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload -; SSE-NEXT: andq 344(%rdi), %r11 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: andq 88(%rdi), %r8 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 472(%rdi), %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; SSE-NEXT: andq 216(%rdi), %r14 -; SSE-NEXT: orq %r11, %r8 -; SSE-NEXT: orq %rax, %r14 -; SSE-NEXT: orq %r8, %r14 -; SSE-NEXT: orq %r10, %r14 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload -; SSE-NEXT: andq 312(%rdi), %r11 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; SSE-NEXT: andq 56(%rdi), %r10 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: andq 440(%rdi), %r8 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; SSE-NEXT: andq 184(%rdi), %r9 -; SSE-NEXT: orq %r11, %r10 -; SSE-NEXT: orq %r8, %r9 -; SSE-NEXT: orq %r10, %r9 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; SSE-NEXT: shldq %cl, %rax, %rdx -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; SSE-NEXT: andq 376(%rdi), %r10 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; SSE-NEXT: andq 120(%rdi), %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload -; SSE-NEXT: andq 504(%rdi), %r11 -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; SSE-NEXT: andq 248(%rdi), %r8 -; SSE-NEXT: orq %r10, %rax -; SSE-NEXT: movq %rax, %r10 -; SSE-NEXT: orq %r11, %r8 -; SSE-NEXT: movq 1056(%rsp,%rsi), %rax -; SSE-NEXT: shldq %cl, %rax, %rbx -; SSE-NEXT: # kill: def $cl killed $cl killed $ecx -; SSE-NEXT: shlq %cl, %rax -; SSE-NEXT: orq %r10, %r8 -; SSE-NEXT: orq %r9, %r8 -; SSE-NEXT: andq 256(%rdi), %rdx -; SSE-NEXT: orq %r14, %r8 -; SSE-NEXT: andq (%rdi), %rax -; SSE-NEXT: orq %rdx, %rax -; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload -; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload -; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload -; SSE-NEXT: orq %rbp, %rax -; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload -; SSE-NEXT: andq 264(%rdi), %rcx -; SSE-NEXT: andq 8(%rdi), %rbx -; SSE-NEXT: orq %rcx, %rbx -; SSE-NEXT: orq %r12, %rbx -; SSE-NEXT: orq %r13, %rbx -; SSE-NEXT: orq %r15, %rbx -; SSE-NEXT: orq %r8, %rbx -; SSE-NEXT: orq %rax, %rbx -; SSE-NEXT: setne %al -; SSE-NEXT: addq $1576, %rsp # imm = 0x628 -; SSE-NEXT: popq %rbx -; SSE-NEXT: popq %r12 -; SSE-NEXT: popq %r13 -; SSE-NEXT: popq %r14 -; SSE-NEXT: popq %r15 -; SSE-NEXT: popq %rbp -; SSE-NEXT: retq -; -; AVX2-LABEL: test_ne_i4096: -; AVX2: # %bb.0: -; AVX2-NEXT: pushq %rbp -; AVX2-NEXT: pushq %r15 -; AVX2-NEXT: pushq %r14 -; AVX2-NEXT: pushq %r13 -; AVX2-NEXT: pushq %r12 -; AVX2-NEXT: pushq %rbx -; AVX2-NEXT: subq $1560, %rsp # imm = 0x618 -; AVX2-NEXT: movl %esi, %ecx -; AVX2-NEXT: movl %esi, %eax -; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: andl $4032, %eax # imm = 0xFC0 -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: vmovss {{.*#+}} xmm0 = [1,0,0,0] -; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX2-NEXT: andl $63, %ecx -; AVX2-NEXT: shrl $3, %eax -; AVX2-NEXT: negl %eax -; AVX2-NEXT: movslq %eax, %rsi -; AVX2-NEXT: movq 1280(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1288(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1536(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1544(%rsp,%rsi), %rax -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1152(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1160(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1408(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1416(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1216(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, (%rsp) # 8-byte Spill -; AVX2-NEXT: movq 1224(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1472(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1480(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1088(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1096(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1344(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1352(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1248(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1256(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1504(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1512(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1120(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1128(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1376(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1384(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1184(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1192(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1440(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1448(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1056(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1064(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1312(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1320(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1264(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1272(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1520(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1528(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1136(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1144(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1392(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1400(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1200(%rsp,%rsi), %r11 -; AVX2-NEXT: movq 1208(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %r11, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1456(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1464(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1072(%rsp,%rsi), %r12 -; AVX2-NEXT: movq 1080(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %r12, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1328(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1336(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rdx, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1232(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1240(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rax, %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1488(%rsp,%rsi), %rbp -; AVX2-NEXT: movq 1496(%rsp,%rsi), %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rbp, %rax -; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1104(%rsp,%rsi), %rax -; AVX2-NEXT: movq 1112(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: shldq %cl, %rax, %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1360(%rsp,%rsi), %r10 -; AVX2-NEXT: movq 1368(%rsp,%rsi), %r8 -; AVX2-NEXT: movq %r8, %rdx -; AVX2-NEXT: shldq %cl, %r10, %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1168(%rsp,%rsi), %r9 -; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1176(%rsp,%rsi), %rbx -; AVX2-NEXT: movq %rbx, %rdx -; AVX2-NEXT: shldq %cl, %r9, %rdx -; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1424(%rsp,%rsi), %r9 -; AVX2-NEXT: movq 1432(%rsp,%rsi), %rdx -; AVX2-NEXT: movq %rdx, %r14 -; AVX2-NEXT: shldq %cl, %r9, %r14 -; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1296(%rsp,%rsi), %r15 -; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq 1304(%rsp,%rsi), %r14 -; AVX2-NEXT: movq %r14, %r13 -; AVX2-NEXT: shldq %cl, %r15, %r13 -; AVX2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, (%rsp) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq 1048(%rsp,%rsi), %rdx -; AVX2-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: shldq %cl, %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload -; AVX2-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload -; AVX2-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload -; AVX2-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r14, %rbx -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r14, %r11 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r14, %r12 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r14, %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r14, %r13 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r14, %rbp -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r14, %rax -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r14, %r10 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, %r14 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r15, %r9 -; AVX2-NEXT: andq 384(%rdi), %r9 -; AVX2-NEXT: andq 128(%rdi), %r14 -; AVX2-NEXT: andq 320(%rdi), %r10 -; AVX2-NEXT: orq %r9, %r14 -; AVX2-NEXT: movq %r14, %r15 -; AVX2-NEXT: andq 64(%rdi), %rax -; AVX2-NEXT: orq %r10, %rax -; AVX2-NEXT: andq 448(%rdi), %rbp -; AVX2-NEXT: andq 192(%rdi), %r13 -; AVX2-NEXT: orq %rbp, %r13 -; AVX2-NEXT: orq %rax, %r13 -; AVX2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: andq 288(%rdi), %r8 -; AVX2-NEXT: andq 32(%rdi), %r12 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 416(%rdi), %rax -; AVX2-NEXT: orq %r8, %r12 -; AVX2-NEXT: andq 160(%rdi), %r11 -; AVX2-NEXT: orq %rax, %r11 -; AVX2-NEXT: andq 352(%rdi), %rbx -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 96(%rdi), %rax -; AVX2-NEXT: orq %r12, %r11 -; AVX2-NEXT: orq %rbx, %rax -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX2-NEXT: andq 480(%rdi), %r10 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX2-NEXT: andq 224(%rdi), %r13 -; AVX2-NEXT: orq %r10, %r13 -; AVX2-NEXT: orq %rax, %r13 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 272(%rdi), %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 16(%rdi), %rax -; AVX2-NEXT: orq %r11, %r13 -; AVX2-NEXT: orq %r8, %rax -; AVX2-NEXT: movq %rax, %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; AVX2-NEXT: andq 400(%rdi), %r9 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 144(%rdi), %rax -; AVX2-NEXT: orq %r9, %rax -; AVX2-NEXT: orq %r8, %rax -; AVX2-NEXT: movq %rax, %r9 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX2-NEXT: andq 336(%rdi), %r10 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 80(%rdi), %rax -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 464(%rdi), %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload -; AVX2-NEXT: andq 208(%rdi), %r11 -; AVX2-NEXT: orq %r10, %rax -; AVX2-NEXT: orq %r8, %r11 -; AVX2-NEXT: orq %rax, %r11 -; AVX2-NEXT: orq %r9, %r11 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; AVX2-NEXT: andq 304(%rdi), %r9 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 48(%rdi), %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX2-NEXT: andq 432(%rdi), %r10 -; AVX2-NEXT: movq (%rsp), %rax # 8-byte Reload -; AVX2-NEXT: andq 176(%rdi), %rax -; AVX2-NEXT: orq %r9, %r8 -; AVX2-NEXT: movq %r8, %r9 -; AVX2-NEXT: orq %r10, %rax -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 368(%rdi), %r8 -; AVX2-NEXT: orq %r9, %rax -; AVX2-NEXT: movq %rax, %r10 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 112(%rdi), %rax -; AVX2-NEXT: orq %r8, %rax -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 496(%rdi), %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; AVX2-NEXT: andq 240(%rdi), %r9 -; AVX2-NEXT: orq %r8, %r9 -; AVX2-NEXT: orq %rax, %r9 -; AVX2-NEXT: orq %r10, %r9 -; AVX2-NEXT: orq %r11, %r9 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX2-NEXT: andq 392(%rdi), %r10 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload -; AVX2-NEXT: andq 136(%rdi), %rbp -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 328(%rdi), %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 72(%rdi), %rax -; AVX2-NEXT: orq %r10, %rbp -; AVX2-NEXT: orq %r8, %rax -; AVX2-NEXT: movq %rax, %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 456(%rdi), %rax -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload -; AVX2-NEXT: andq 200(%rdi), %r12 -; AVX2-NEXT: orq %rax, %r12 -; AVX2-NEXT: orq %r8, %r12 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX2-NEXT: andq 296(%rdi), %r10 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 40(%rdi), %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload -; AVX2-NEXT: andq 424(%rdi), %r11 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 168(%rdi), %rax -; AVX2-NEXT: orq %r10, %r8 -; AVX2-NEXT: movq %r8, %r10 -; AVX2-NEXT: orq %r11, %rax -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 360(%rdi), %r8 -; AVX2-NEXT: orq %r10, %rax -; AVX2-NEXT: movq %rax, %r10 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 104(%rdi), %rax -; AVX2-NEXT: orq %r8, %rax -; AVX2-NEXT: movq %rax, %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 488(%rdi), %rax -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX2-NEXT: andq 232(%rdi), %r14 -; AVX2-NEXT: orq %rax, %r14 -; AVX2-NEXT: orq %r8, %r14 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 280(%rdi), %r8 -; AVX2-NEXT: orq %r10, %r14 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 24(%rdi), %rax -; AVX2-NEXT: orq %r8, %rax -; AVX2-NEXT: movq %rax, %r10 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 408(%rdi), %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 152(%rdi), %rax -; AVX2-NEXT: orq %r8, %rax -; AVX2-NEXT: orq %r10, %rax -; AVX2-NEXT: movq %rax, %r10 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload -; AVX2-NEXT: andq 344(%rdi), %r11 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 88(%rdi), %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 472(%rdi), %rax -; AVX2-NEXT: orq %r11, %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload -; AVX2-NEXT: andq 216(%rdi), %rbx -; AVX2-NEXT: orq %rax, %rbx -; AVX2-NEXT: orq %r8, %rbx -; AVX2-NEXT: orq %r10, %rbx -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 312(%rdi), %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 56(%rdi), %rax -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX2-NEXT: andq 440(%rdi), %r10 -; AVX2-NEXT: orq %r8, %rax -; AVX2-NEXT: movq %rax, %r11 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 184(%rdi), %r8 -; AVX2-NEXT: orq %r10, %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX2-NEXT: andq 376(%rdi), %r10 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 120(%rdi), %rax -; AVX2-NEXT: orq %r11, %r8 -; AVX2-NEXT: movq %r8, %r11 -; AVX2-NEXT: orq %r10, %rax -; AVX2-NEXT: movq %rax, %r10 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: andq 504(%rdi), %r8 -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andq 248(%rdi), %rax -; AVX2-NEXT: orq %r8, %rax -; AVX2-NEXT: orq %r10, %rax -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX2-NEXT: shldq %cl, %r8, %r10 -; AVX2-NEXT: orq %r11, %rax -; AVX2-NEXT: movq 1040(%rsp,%rsi), %rsi -; AVX2-NEXT: orq %rbx, %rax -; AVX2-NEXT: movq %rax, %r8 -; AVX2-NEXT: shlxq %rcx, %rsi, %rax -; AVX2-NEXT: andq 256(%rdi), %r10 -; AVX2-NEXT: andq (%rdi), %rax -; AVX2-NEXT: orq %r10, %rax -; AVX2-NEXT: orq %r15, %rax -; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload -; AVX2-NEXT: orq %r13, %rax -; AVX2-NEXT: # kill: def $cl killed $cl killed $rcx -; AVX2-NEXT: shldq %cl, %rsi, %rdx -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload -; AVX2-NEXT: andq 264(%rdi), %rcx -; AVX2-NEXT: andq 8(%rdi), %rdx -; AVX2-NEXT: orq %r9, %rax -; AVX2-NEXT: orq %rcx, %rdx -; AVX2-NEXT: orq %rbp, %rdx -; AVX2-NEXT: orq %r12, %rdx -; AVX2-NEXT: orq %r14, %rdx -; AVX2-NEXT: orq %r8, %rdx -; AVX2-NEXT: orq %rax, %rdx -; AVX2-NEXT: setne %al -; AVX2-NEXT: addq $1560, %rsp # imm = 0x618 -; AVX2-NEXT: popq %rbx -; AVX2-NEXT: popq %r12 -; AVX2-NEXT: popq %r13 -; AVX2-NEXT: popq %r14 -; AVX2-NEXT: popq %r15 -; AVX2-NEXT: popq %rbp -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512-LABEL: test_ne_i4096: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %rbp -; AVX512-NEXT: pushq %r15 -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %r13 -; AVX512-NEXT: pushq %r12 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: subq $1560, %rsp # imm = 0x618 -; AVX512-NEXT: movl %esi, %ecx -; AVX512-NEXT: movl %esi, %eax -; AVX512-NEXT: andl $4032, %eax # imm = 0xFC0 -; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [1,0,0,0] -; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: andl $63, %ecx -; AVX512-NEXT: shrl $3, %eax -; AVX512-NEXT: negl %eax -; AVX512-NEXT: movslq %eax, %rsi -; AVX512-NEXT: movq 1280(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1288(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1536(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1544(%rsp,%rsi), %rax -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1152(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1160(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1408(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1416(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1216(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, (%rsp) # 8-byte Spill -; AVX512-NEXT: movq 1224(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1472(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1480(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1088(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1096(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1344(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1352(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1248(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1256(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1504(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1512(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1120(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1128(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1376(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1384(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1184(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1192(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1440(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1448(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1056(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1064(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1312(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1320(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1264(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1272(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1520(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1528(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1136(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1144(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1392(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1400(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1200(%rsp,%rsi), %r10 -; AVX512-NEXT: movq 1208(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %r10, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1456(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1464(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1072(%rsp,%rsi), %r14 -; AVX512-NEXT: movq 1080(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %r14, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1328(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1336(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rdx, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1232(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1240(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rax, %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1488(%rsp,%rsi), %r12 -; AVX512-NEXT: movq 1496(%rsp,%rsi), %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %r12, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1104(%rsp,%rsi), %rax -; AVX512-NEXT: movq 1112(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq %cl, %rax, %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1360(%rsp,%rsi), %r11 -; AVX512-NEXT: movq 1368(%rsp,%rsi), %rbx -; AVX512-NEXT: movq %rbx, %rdx -; AVX512-NEXT: shldq %cl, %r11, %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1168(%rsp,%rsi), %r9 -; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1176(%rsp,%rsi), %r8 -; AVX512-NEXT: movq %r8, %rdx -; AVX512-NEXT: shldq %cl, %r9, %rdx -; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1424(%rsp,%rsi), %r9 -; AVX512-NEXT: movq 1432(%rsp,%rsi), %rdx -; AVX512-NEXT: movq %rdx, %r15 -; AVX512-NEXT: shldq %cl, %r9, %r15 -; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1296(%rsp,%rsi), %rbp -; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq 1304(%rsp,%rsi), %r15 -; AVX512-NEXT: movq %r15, %r13 -; AVX512-NEXT: shldq %cl, %rbp, %r13 -; AVX512-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r13, (%rsp) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq 1048(%rsp,%rsi), %rdx -; AVX512-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload -; AVX512-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload -; AVX512-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload -; AVX512-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r15, %rbx -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r15, %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r15, %r14 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r15, %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r15, %r13 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r15, %r12 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r15, %rax -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %r15, %r11 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload -; AVX512-NEXT: shldq %cl, %rbp, %r15 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload -; AVX512-NEXT: shldq %cl, %rbp, %r9 -; AVX512-NEXT: andq 384(%rdi), %r9 -; AVX512-NEXT: andq 128(%rdi), %r15 -; AVX512-NEXT: orq %r9, %r15 -; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: andq 320(%rdi), %r11 -; AVX512-NEXT: andq 64(%rdi), %rax -; AVX512-NEXT: orq %r11, %rax -; AVX512-NEXT: andq 448(%rdi), %r12 -; AVX512-NEXT: andq 192(%rdi), %r13 -; AVX512-NEXT: orq %r12, %r13 -; AVX512-NEXT: orq %rax, %r13 -; AVX512-NEXT: andq 288(%rdi), %r8 -; AVX512-NEXT: andq 32(%rdi), %r14 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 416(%rdi), %rax -; AVX512-NEXT: orq %r8, %r14 -; AVX512-NEXT: andq 160(%rdi), %r10 -; AVX512-NEXT: orq %rax, %r10 -; AVX512-NEXT: andq 352(%rdi), %rbx -; AVX512-NEXT: orq %r14, %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 96(%rdi), %rax -; AVX512-NEXT: orq %rbx, %rax -; AVX512-NEXT: movq %rax, %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 480(%rdi), %rax -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX512-NEXT: andq 224(%rdi), %r15 -; AVX512-NEXT: orq %rax, %r15 -; AVX512-NEXT: orq %r8, %r15 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 272(%rdi), %r8 -; AVX512-NEXT: orq %r10, %r15 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 16(%rdi), %rax -; AVX512-NEXT: orq %r8, %rax -; AVX512-NEXT: movq %rax, %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; AVX512-NEXT: andq 400(%rdi), %r9 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 144(%rdi), %rax -; AVX512-NEXT: orq %r9, %rax -; AVX512-NEXT: orq %r8, %rax -; AVX512-NEXT: movq %rax, %r9 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX512-NEXT: andq 336(%rdi), %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 80(%rdi), %rax -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 464(%rdi), %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload -; AVX512-NEXT: andq 208(%rdi), %r11 -; AVX512-NEXT: orq %r10, %rax -; AVX512-NEXT: orq %r8, %r11 -; AVX512-NEXT: orq %rax, %r11 -; AVX512-NEXT: orq %r9, %r11 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX512-NEXT: andq 304(%rdi), %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 48(%rdi), %rax -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; AVX512-NEXT: andq 432(%rdi), %r9 -; AVX512-NEXT: movq (%rsp), %r8 # 8-byte Reload -; AVX512-NEXT: andq 176(%rdi), %r8 -; AVX512-NEXT: orq %r10, %rax -; AVX512-NEXT: movq %rax, %r10 -; AVX512-NEXT: orq %r9, %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; AVX512-NEXT: andq 368(%rdi), %r9 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 112(%rdi), %rax -; AVX512-NEXT: orq %r10, %r8 -; AVX512-NEXT: movq %r8, %r10 -; AVX512-NEXT: orq %r9, %rax -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 496(%rdi), %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload -; AVX512-NEXT: andq 240(%rdi), %r9 -; AVX512-NEXT: orq %r8, %r9 -; AVX512-NEXT: orq %rax, %r9 -; AVX512-NEXT: orq %r10, %r9 -; AVX512-NEXT: orq %r11, %r9 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX512-NEXT: andq 392(%rdi), %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload -; AVX512-NEXT: andq 136(%rdi), %rbp -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 328(%rdi), %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 72(%rdi), %rax -; AVX512-NEXT: orq %r10, %rbp -; AVX512-NEXT: orq %r8, %rax -; AVX512-NEXT: movq %rax, %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 456(%rdi), %rax -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload -; AVX512-NEXT: andq 200(%rdi), %r12 -; AVX512-NEXT: orq %rax, %r12 -; AVX512-NEXT: orq %r8, %r12 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 296(%rdi), %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 40(%rdi), %rax -; AVX512-NEXT: orq %r8, %rax -; AVX512-NEXT: movq %rax, %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 424(%rdi), %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 168(%rdi), %rax -; AVX512-NEXT: orq %r8, %rax -; AVX512-NEXT: orq %r10, %rax -; AVX512-NEXT: movq %rax, %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 360(%rdi), %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 104(%rdi), %rax -; AVX512-NEXT: orq %r8, %rax -; AVX512-NEXT: movq %rax, %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 488(%rdi), %rax -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload -; AVX512-NEXT: andq 232(%rdi), %r14 -; AVX512-NEXT: orq %rax, %r14 -; AVX512-NEXT: orq %r8, %r14 -; AVX512-NEXT: orq %r10, %r14 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 280(%rdi), %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 24(%rdi), %rax -; AVX512-NEXT: orq %r8, %rax -; AVX512-NEXT: movq %rax, %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 408(%rdi), %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 152(%rdi), %rax -; AVX512-NEXT: orq %r8, %rax -; AVX512-NEXT: orq %r10, %rax -; AVX512-NEXT: movq %rax, %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload -; AVX512-NEXT: andq 344(%rdi), %r11 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 88(%rdi), %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 472(%rdi), %rax -; AVX512-NEXT: orq %r11, %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload -; AVX512-NEXT: andq 216(%rdi), %rbx -; AVX512-NEXT: orq %rax, %rbx -; AVX512-NEXT: orq %r8, %rbx -; AVX512-NEXT: orq %r10, %rbx -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX512-NEXT: andq 312(%rdi), %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 56(%rdi), %rax -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 440(%rdi), %r8 -; AVX512-NEXT: orq %r10, %rax -; AVX512-NEXT: movq %rax, %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 184(%rdi), %rax -; AVX512-NEXT: orq %r8, %rax -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 376(%rdi), %r8 -; AVX512-NEXT: orq %r10, %rax -; AVX512-NEXT: movq %rax, %r11 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 120(%rdi), %rax -; AVX512-NEXT: orq %r8, %rax -; AVX512-NEXT: movq %rax, %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 504(%rdi), %rax -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: andq 248(%rdi), %r8 -; AVX512-NEXT: orq %rax, %r8 -; AVX512-NEXT: orq %r10, %r8 -; AVX512-NEXT: orq %r11, %r8 -; AVX512-NEXT: movq 1040(%rsp,%rsi), %rax -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX512-NEXT: shldq %cl, %rsi, %r10 -; AVX512-NEXT: orq %rbx, %r8 -; AVX512-NEXT: shlxq %rcx, %rax, %rsi -; AVX512-NEXT: andq 256(%rdi), %r10 -; AVX512-NEXT: andq (%rdi), %rsi -; AVX512-NEXT: orq %r10, %rsi -; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload -; AVX512-NEXT: orq %r13, %rsi -; AVX512-NEXT: orq %r15, %rsi -; AVX512-NEXT: # kill: def $cl killed $cl killed $rcx -; AVX512-NEXT: shldq %cl, %rax, %rdx -; AVX512-NEXT: orq %r9, %rsi -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: andq 264(%rdi), %rax -; AVX512-NEXT: andq 8(%rdi), %rdx -; AVX512-NEXT: orq %rax, %rdx -; AVX512-NEXT: orq %rbp, %rdx -; AVX512-NEXT: orq %r12, %rdx -; AVX512-NEXT: orq %r14, %rdx -; AVX512-NEXT: orq %r8, %rdx -; AVX512-NEXT: orq %rsi, %rdx -; AVX512-NEXT: setne %al -; AVX512-NEXT: addq $1560, %rsp # imm = 0x618 -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r12 -; AVX512-NEXT: popq %r13 -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: popq %r15 -; AVX512-NEXT: popq %rbp -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; X64-LABEL: test_ne_i4096: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %eax +; X64-NEXT: andl $4064, %eax # imm = 0xFE0 +; X64-NEXT: shrl $3, %eax +; X64-NEXT: movl (%rdi,%rax), %eax +; X64-NEXT: btl %esi, %eax +; X64-NEXT: setb %al +; X64-NEXT: retq %rem = and i32 %position, 4095 %ofs = zext nneg i32 %rem to i4096 %bit = shl nuw i4096 1, %ofs diff --git a/llvm/test/CodeGen/X86/ldexp-avx512.ll b/llvm/test/CodeGen/X86/ldexp-avx512.ll new file mode 100644 index 0000000..ea93a91 --- /dev/null +++ b/llvm/test/CodeGen/X86/ldexp-avx512.ll @@ -0,0 +1,467 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX512 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512VL + +define half @test_half(half %x, i32 %exp) nounwind { +; CHECK-LABEL: test_half: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-NEXT: popq %rax +; CHECK-NEXT: retq +entry: + %r = tail call fast half @llvm.ldexp.f16.i32(half %x, i32 %exp) + ret half %r +} +declare half @llvm.ldexp.f16.i32(half, i32) memory(none) + +define float @test_float(float %x, i32 %exp) nounwind { +; CHECK-LABEL: test_float: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: jmp ldexpf@PLT # TAILCALL +entry: + %r = tail call fast float @ldexpf(float %x, i32 %exp) + ret float %r +} +declare float @ldexpf(float, i32) memory(none) + +define double @test_double(double %x, i32 %exp) nounwind { +; CHECK-LABEL: test_double: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: jmp ldexp@PLT # TAILCALL +entry: + %r = tail call fast double @ldexp(double %x, i32 %exp) + ret double %r +} +declare double @ldexp(double, i32) memory(none) + +define fp128 @testExpl(fp128 %x, i32 %exp) nounwind { +; CHECK-LABEL: testExpl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: jmp ldexpl@PLT # TAILCALL +entry: + %r = tail call fast fp128 @ldexpl(fp128 %x, i32 %exp) + ret fp128 %r +} +declare fp128 @ldexpl(fp128, i32) memory(none) + +define <4 x float> @test_ldexp_4xfloat(<4 x float> %x, <4 x i32> %exp) nounwind { +; CHECK-LABEL: test_ldexp_4xfloat: +; CHECK: # %bb.0: +; CHECK-NEXT: subq $56, %rsp +; CHECK-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovd %xmm1, %edi +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $1, %xmm0, %edi +; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,1,3,3] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $2, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $3, %xmm0, %edi +; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[3,3,3,3] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; CHECK-NEXT: addq $56, %rsp +; CHECK-NEXT: retq + %r = call <4 x float> @llvm.ldexp.v4f32.v4i32(<4 x float> %x, <4 x i32> %exp) + ret <4 x float> %r +} +declare <4 x float> @llvm.ldexp.v4f32.v4i32(<4 x float>, <4 x i32>) + +define <2 x double> @test_ldexp_2xdouble(<2 x double> %x, <2 x i32> %exp) nounwind { +; CHECK-LABEL: test_ldexp_2xdouble: +; CHECK: # %bb.0: +; CHECK-NEXT: subq $56, %rsp +; CHECK-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovd %xmm1, %edi +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $1, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: addq $56, %rsp +; CHECK-NEXT: retq + %r = call <2 x double> @llvm.ldexp.v2f64.v2i32(<2 x double> %x, <2 x i32> %exp) + ret <2 x double> %r +} +declare <2 x double> @llvm.ldexp.v2f64.v2i32(<2 x double>, <2 x i32>) + +define <8 x float> @test_ldexp_8xfloat(<8 x float> %x, <8 x i32> %exp) nounwind { +; CHECK-LABEL: test_ldexp_8xfloat: +; CHECK: # %bb.0: +; CHECK-NEXT: subq $120, %rsp +; CHECK-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm1 +; CHECK-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovd %xmm1, %edi +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $1, %xmm0, %edi +; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,1,3,3] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $2, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $3, %xmm0, %edi +; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[3,3,3,3] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: vmovd %xmm0, %edi +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: vextractps $1, %xmm0, %edi +; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,1,3,3] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: vextractps $2, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: vextractps $3, %xmm0, %edi +; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[3,3,3,3] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload +; CHECK-NEXT: addq $120, %rsp +; CHECK-NEXT: retq + %r = call <8 x float> @llvm.ldexp.v8f32.v8i32(<8 x float> %x, <8 x i32> %exp) + ret <8 x float> %r +} +declare <8 x float> @llvm.ldexp.v8f32.v8i32(<8 x float>, <8 x i32>) + +define <4 x double> @test_ldexp_4xdouble(<4 x double> %x, <4 x i32> %exp) nounwind { +; CHECK-LABEL: test_ldexp_4xdouble: +; CHECK: # %bb.0: +; CHECK-NEXT: subq $88, %rsp +; CHECK-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vextractps $2, %xmm1, %edi +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $3, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: vmovd %xmm0, %edi +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $1, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovapd (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload +; CHECK-NEXT: addq $88, %rsp +; CHECK-NEXT: retq + %r = call <4 x double> @llvm.ldexp.v4f64.v4i32(<4 x double> %x, <4 x i32> %exp) + ret <4 x double> %r +} +declare <4 x double> @llvm.ldexp.v4f64.v4i32(<4 x double>, <4 x i32>) + +define <16 x float> @test_ldexp_16xfloat(<16 x float> %x, <16 x i32> %exp) nounwind { +; CHECK-LABEL: test_ldexp_16xfloat: +; CHECK: # %bb.0: +; CHECK-NEXT: subq $216, %rsp +; CHECK-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill +; CHECK-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill +; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm0 +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vextracti32x4 $3, %zmm1, %xmm1 +; CHECK-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovd %xmm1, %edi +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $1, %xmm0, %edi +; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,1,3,3] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $2, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $3, %xmm0, %edi +; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[3,3,3,3] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm1 +; CHECK-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovd %xmm0, %edi +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $1, %xmm0, %edi +; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,1,3,3] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $2, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $3, %xmm0, %edi +; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[3,3,3,3] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload +; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovd %xmm0, %edi +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $1, %xmm0, %edi +; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,1,3,3] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $2, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $3, %xmm0, %edi +; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[3,3,3,3] +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; CHECK-NEXT: vmovd %xmm0, %edi +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; CHECK-NEXT: vextractps $1, %xmm0, %edi +; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,1,3,3] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; CHECK-NEXT: vextractps $2, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; CHECK-NEXT: vextractps $3, %xmm0, %edi +; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[3,3,3,3] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexpf@PLT +; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload +; CHECK-NEXT: vinsertf64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload +; CHECK-NEXT: addq $216, %rsp +; CHECK-NEXT: retq + %r = call <16 x float> @llvm.ldexp.v16f32.v16i32(<16 x float> %x, <16 x i32> %exp) + ret <16 x float> %r +} +declare <16 x float> @llvm.ldexp.v16f32.v16i32(<16 x float>, <16 x i32>) + +define <8 x double> @test_ldexp_8xdouble(<8 x double> %x, <8 x i32> %exp) nounwind { +; CHECK-LABEL: test_ldexp_8xdouble: +; CHECK: # %bb.0: +; CHECK-NEXT: subq $184, %rsp +; CHECK-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill +; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm0 +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1 +; CHECK-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vextractps $2, %xmm1, %edi +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $3, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm1 +; CHECK-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: vmovd %xmm0, %edi +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: vextractps $1, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovapd (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload +; CHECK-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: vextractps $2, %xmm0, %edi +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: vextractps $3, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: vmovd %xmm0, %edi +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: vextractps $1, %xmm0, %edi +; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: # xmm0 = mem[1,0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq ldexp@PLT +; CHECK-NEXT: vmovapd (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload +; CHECK-NEXT: vinsertf64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload +; CHECK-NEXT: addq $184, %rsp +; CHECK-NEXT: retq + %r = call <8 x double> @llvm.ldexp.v8f64.v8i32(<8 x double> %x, <8 x i32> %exp) + ret <8 x double> %r +} +declare <8 x double> @llvm.ldexp.v8f64.v8i32(<8 x double>, <8 x i32>) + +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; AVX512: {{.*}} +; AVX512VL: {{.*}} diff --git a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll index ab1feba..044327d 100644 --- a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll @@ -875,28 +875,12 @@ define i1 @mask_v8i32(<8 x i32> %a0) { ; SSE41-NEXT: sete %al ; SSE41-NEXT: retq ; -; AVX1-LABEL: mask_v8i32: -; AVX1: # %bb.0: -; AVX1-NEXT: vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0 -; AVX1-NEXT: sete %al -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; -; AVX2-LABEL: mask_v8i32: -; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372039002259456,9223372039002259456,9223372039002259456,9223372039002259456] -; AVX2-NEXT: vptest %ymm1, %ymm0 -; AVX2-NEXT: sete %al -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512-LABEL: mask_v8i32: -; AVX512: # %bb.0: -; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372039002259456,9223372039002259456,9223372039002259456,9223372039002259456] -; AVX512-NEXT: vptest %ymm1, %ymm0 -; AVX512-NEXT: sete %al -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; AVX-LABEL: mask_v8i32: +; AVX: # %bb.0: +; AVX-NEXT: vtestps %ymm0, %ymm0 +; AVX-NEXT: sete %al +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq %1 = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %a0) %2 = and i32 %1, 2147483648 %3 = icmp eq i32 %2, 0 @@ -965,33 +949,46 @@ define i1 @signtest_v8i32(<8 x i32> %a0) { ; SSE41-NEXT: sete %al ; SSE41-NEXT: retq ; -; AVX1-LABEL: signtest_v8i32: -; AVX1: # %bb.0: -; AVX1-NEXT: vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0 -; AVX1-NEXT: sete %al -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; -; AVX2-LABEL: signtest_v8i32: -; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372039002259456,9223372039002259456,9223372039002259456,9223372039002259456] -; AVX2-NEXT: vptest %ymm1, %ymm0 -; AVX2-NEXT: sete %al -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512-LABEL: signtest_v8i32: -; AVX512: # %bb.0: -; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372039002259456,9223372039002259456,9223372039002259456,9223372039002259456] -; AVX512-NEXT: vptest %ymm1, %ymm0 -; AVX512-NEXT: sete %al -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; AVX-LABEL: signtest_v8i32: +; AVX: # %bb.0: +; AVX-NEXT: vtestps %ymm0, %ymm0 +; AVX-NEXT: sete %al +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq %1 = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %a0) %2 = icmp sgt i32 %1, -1 ret i1 %2 } +define i1 @signtest_v4i64(<4 x i64> %a0) { +; SSE2-LABEL: signtest_v4i64: +; SSE2: # %bb.0: +; SSE2-NEXT: por %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; SSE2-NEXT: por %xmm0, %xmm1 +; SSE2-NEXT: movq %xmm1, %rax +; SSE2-NEXT: testq %rax, %rax +; SSE2-NEXT: setns %al +; SSE2-NEXT: retq +; +; SSE41-LABEL: signtest_v4i64: +; SSE41: # %bb.0: +; SSE41-NEXT: por %xmm1, %xmm0 +; SSE41-NEXT: ptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: sete %al +; SSE41-NEXT: retq +; +; AVX-LABEL: signtest_v4i64: +; AVX: # %bb.0: +; AVX-NEXT: vtestpd %ymm0, %ymm0 +; AVX-NEXT: sete %al +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq + %1 = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %a0) + %2 = icmp sgt i64 %1, -1 + ret i1 %2 +} + define i1 @trunc_v16i16(<16 x i16> %a0) { ; SSE2-LABEL: trunc_v16i16: ; SSE2: # %bb.0: @@ -1162,11 +1159,11 @@ define i32 @mask_v3i1(<3 x i32> %a, <3 x i32> %b) { ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: orl %ecx, %eax ; SSE2-NEXT: testb $1, %al -; SSE2-NEXT: je .LBB29_2 +; SSE2-NEXT: je .LBB30_2 ; SSE2-NEXT: # %bb.1: ; SSE2-NEXT: xorl %eax, %eax ; SSE2-NEXT: retq -; SSE2-NEXT: .LBB29_2: +; SSE2-NEXT: .LBB30_2: ; SSE2-NEXT: movl $1, %eax ; SSE2-NEXT: retq ; @@ -1181,11 +1178,11 @@ define i32 @mask_v3i1(<3 x i32> %a, <3 x i32> %b) { ; SSE41-NEXT: pextrd $2, %xmm1, %eax ; SSE41-NEXT: orl %ecx, %eax ; SSE41-NEXT: testb $1, %al -; SSE41-NEXT: je .LBB29_2 +; SSE41-NEXT: je .LBB30_2 ; SSE41-NEXT: # %bb.1: ; SSE41-NEXT: xorl %eax, %eax ; SSE41-NEXT: retq -; SSE41-NEXT: .LBB29_2: +; SSE41-NEXT: .LBB30_2: ; SSE41-NEXT: movl $1, %eax ; SSE41-NEXT: retq ; @@ -1200,11 +1197,11 @@ define i32 @mask_v3i1(<3 x i32> %a, <3 x i32> %b) { ; AVX1OR2-NEXT: vpextrd $2, %xmm0, %eax ; AVX1OR2-NEXT: orl %ecx, %eax ; AVX1OR2-NEXT: testb $1, %al -; AVX1OR2-NEXT: je .LBB29_2 +; AVX1OR2-NEXT: je .LBB30_2 ; AVX1OR2-NEXT: # %bb.1: ; AVX1OR2-NEXT: xorl %eax, %eax ; AVX1OR2-NEXT: retq -; AVX1OR2-NEXT: .LBB29_2: +; AVX1OR2-NEXT: .LBB30_2: ; AVX1OR2-NEXT: movl $1, %eax ; AVX1OR2-NEXT: retq ; @@ -1219,12 +1216,12 @@ define i32 @mask_v3i1(<3 x i32> %a, <3 x i32> %b) { ; AVX512F-NEXT: korw %k0, %k1, %k0 ; AVX512F-NEXT: kmovw %k0, %eax ; AVX512F-NEXT: testb $1, %al -; AVX512F-NEXT: je .LBB29_2 +; AVX512F-NEXT: je .LBB30_2 ; AVX512F-NEXT: # %bb.1: ; AVX512F-NEXT: xorl %eax, %eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq -; AVX512F-NEXT: .LBB29_2: +; AVX512F-NEXT: .LBB30_2: ; AVX512F-NEXT: movl $1, %eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -1240,12 +1237,12 @@ define i32 @mask_v3i1(<3 x i32> %a, <3 x i32> %b) { ; AVX512BW-NEXT: korw %k0, %k1, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax ; AVX512BW-NEXT: testb $1, %al -; AVX512BW-NEXT: je .LBB29_2 +; AVX512BW-NEXT: je .LBB30_2 ; AVX512BW-NEXT: # %bb.1: ; AVX512BW-NEXT: xorl %eax, %eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq -; AVX512BW-NEXT: .LBB29_2: +; AVX512BW-NEXT: .LBB30_2: ; AVX512BW-NEXT: movl $1, %eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq @@ -1259,11 +1256,11 @@ define i32 @mask_v3i1(<3 x i32> %a, <3 x i32> %b) { ; AVX512BWVL-NEXT: korw %k0, %k1, %k0 ; AVX512BWVL-NEXT: kmovd %k0, %eax ; AVX512BWVL-NEXT: testb $1, %al -; AVX512BWVL-NEXT: je .LBB29_2 +; AVX512BWVL-NEXT: je .LBB30_2 ; AVX512BWVL-NEXT: # %bb.1: ; AVX512BWVL-NEXT: xorl %eax, %eax ; AVX512BWVL-NEXT: retq -; AVX512BWVL-NEXT: .LBB29_2: +; AVX512BWVL-NEXT: .LBB30_2: ; AVX512BWVL-NEXT: movl $1, %eax ; AVX512BWVL-NEXT: retq %1 = icmp ne <3 x i32> %a, %b diff --git a/llvm/test/DebugInfo/Generic/objc-property.ll b/llvm/test/DebugInfo/Generic/objc-property.ll new file mode 100644 index 0000000..007d1fe --- /dev/null +++ b/llvm/test/DebugInfo/Generic/objc-property.ll @@ -0,0 +1,88 @@ +; UNSUPPORTED: target={{.*}}-aix{{.*}} +; +; RUN: llc -filetype=obj -o - %s | llvm-dwarfdump --debug-info - | FileCheck %s + +; CHECK: DW_TAG_structure_type +; CHECK: DW_AT_name ("Foo") +; +; CHECK: DW_TAG_APPLE_property +; CHECK: DW_AT_APPLE_property_name ("autoSynthProp") +; CHECK: DW_AT_APPLE_property_attribute +; CHECK-SAME: DW_APPLE_PROPERTY_assign, DW_APPLE_PROPERTY_readwrite, +; CHECK-SAME: DW_APPLE_PROPERTY_atomic, DW_APPLE_PROPERTY_unsafe_unretained +; +; CHECK: DW_TAG_APPLE_property +; CHECK: DW_AT_APPLE_property_name ("synthProp") +; CHECK: DW_AT_APPLE_property_attribute +; CHECK-SAME: DW_APPLE_PROPERTY_assign, DW_APPLE_PROPERTY_readwrite, +; CHECK-SAME: DW_APPLE_PROPERTY_atomic, DW_APPLE_PROPERTY_unsafe_unretained +; +; CHECK: DW_TAG_APPLE_property +; CHECK: DW_AT_APPLE_property_name ("customGetterProp") +; CHECK: DW_AT_APPLE_property_getter ("customGetter") +; CHECK: DW_AT_APPLE_property_attribute +; CHECK-SAME: DW_APPLE_PROPERTY_getter, DW_APPLE_PROPERTY_assign, DW_APPLE_PROPERTY_readwrite, +; CHECK-SAME: DW_APPLE_PROPERTY_atomic, DW_APPLE_PROPERTY_unsafe_unretained +; +; CHECK: DW_TAG_APPLE_property +; CHECK: DW_AT_APPLE_property_name ("customSetterProp") +; CHECK: DW_AT_APPLE_property_setter ("customSetter:") +; CHECK: DW_AT_APPLE_property_attribute +; CHECK-SAME: DW_APPLE_PROPERTY_assign, DW_APPLE_PROPERTY_readwrite, +; CHECK-SAME: DW_APPLE_PROPERTY_setter, DW_APPLE_PROPERTY_atomic, DW_APPLE_PROPERTY_unsafe_unretained +; +; CHECK: DW_TAG_APPLE_property +; CHECK: DW_AT_APPLE_property_name ("customAccessorsProp") +; CHECK: DW_AT_APPLE_property_getter ("customGetter") +; CHECK: DW_AT_APPLE_property_setter ("customSetter:") +; CHECK: DW_AT_APPLE_property_attribute +; CHECK-SAME: DW_APPLE_PROPERTY_getter, DW_APPLE_PROPERTY_assign, DW_APPLE_PROPERTY_readwrite, +; CHECK-SAME: DW_APPLE_PROPERTY_setter, DW_APPLE_PROPERTY_atomic, DW_APPLE_PROPERTY_unsafe_unretained +; +; FIXME: missing link between DW_TAG_member and the associated DW_TAG_APPLE_property +; CHECK: DW_TAG_member +; CHECK-NOT: DW_AT_APPLE_property +; CHECK: DW_TAG_member +; CHECK-NOT: DW_AT_APPLE_property +; CHECK: DW_TAG_member +; CHECK-NOT: DW_AT_APPLE_property +; CHECK: DW_TAG_member +; CHECK-NOT: DW_AT_APPLE_property + +!llvm.module.flags = !{!0, !1} +!llvm.dbg.cu = !{!2} + +!0 = !{i32 7, !"Dwarf Version", i32 5} +!1 = !{i32 2, !"Debug Info Version", i32 3} +!2 = distinct !DICompileUnit(language: DW_LANG_ObjC, file: !3, producer: "hand written", isOptimized: false, runtimeVersion: 2, emissionKind: FullDebug, retainedTypes: !4, splitDebugInlining: false, debugInfoForProfiling: true, nameTableKind: Apple) +!3 = !DIFile(filename: "main.m", directory: "/tmp") +!4 = !{!5} +!5 = !DICompositeType(tag: DW_TAG_structure_type, name: "Foo", scope: !3, file: !3, line: 1, size: 128, flags: DIFlagObjcClassComplete, elements: !6, runtimeLang: DW_LANG_ObjC) +!6 = !{!7, !9, !10, !11, !12, !13, !14, !15, !16, !17, !24, !27, !28, !29, !30, !31, !32} +!7 = !DIObjCProperty(name: "autoSynthProp", file: !3, line: 5, attributes: 2316, type: !8) +!8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) +!9 = !DIObjCProperty(name: "synthProp", file: !3, line: 6, attributes: 2316, type: !8) +!10 = !DIObjCProperty(name: "customGetterProp", file: !3, line: 7, getter: "customGetter", attributes: 2318, type: !8) +!11 = !DIObjCProperty(name: "customSetterProp", file: !3, line: 8, setter: "customSetter:", attributes: 2444, type: !8) +!12 = !DIObjCProperty(name: "customAccessorsProp", file: !3, line: 9, setter: "customSetter:", getter: "customGetter", attributes: 2446, type: !8) +!13 = !DIDerivedType(tag: DW_TAG_member, name: "someBackingIvar", scope: !3, file: !3, line: 2, baseType: !8, size: 32, flags: DIFlagProtected, extraData: !9) +!14 = !DIDerivedType(tag: DW_TAG_member, name: "_autoSynthProp", scope: !3, file: !3, line: 5, baseType: !8, size: 32, flags: DIFlagPrivate, extraData: !7) +!15 = !DIDerivedType(tag: DW_TAG_member, name: "_customGetterProp", scope: !3, file: !3, line: 7, baseType: !8, size: 32, flags: DIFlagPrivate, extraData: !10) +!16 = !DIDerivedType(tag: DW_TAG_member, name: "_customSetterProp", scope: !3, file: !3, line: 8, baseType: !8, size: 32, flags: DIFlagPrivate, extraData: !11) +!17 = !DISubprogram(name: "-[Foo customGetter]", scope: !5, file: !3, line: 19, type: !18, scopeLine: 19, flags: DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!18 = !DISubroutineType(types: !19) +!19 = !{!8, !20, !21} +!20 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !5, size: 64, flags: DIFlagArtificial | DIFlagObjectPointer) +!21 = !DIDerivedType(tag: DW_TAG_typedef, name: "SEL", file: !3, baseType: !22, flags: DIFlagArtificial) +!22 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !23, size: 64) +!23 = !DICompositeType(tag: DW_TAG_structure_type, name: "objc_selector", file: !3, flags: DIFlagFwdDecl) +!24 = !DISubprogram(name: "-[Foo customSetter:]", scope: !5, file: !3, line: 23, type: !25, scopeLine: 23, flags: DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!25 = !DISubroutineType(types: !26) +!26 = !{null, !20, !21, !8} +!27 = !DISubprogram(name: "-[Foo synthProp]", scope: !5, file: !3, line: 17, type: !18, scopeLine: 17, flags: DIFlagArtificial | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!28 = !DISubprogram(name: "-[Foo setSynthProp:]", scope: !5, file: !3, line: 17, type: !25, scopeLine: 17, flags: DIFlagArtificial | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!29 = !DISubprogram(name: "-[Foo autoSynthProp]", scope: !5, file: !3, line: 5, type: !18, scopeLine: 5, flags: DIFlagArtificial | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!30 = !DISubprogram(name: "-[Foo setAutoSynthProp:]", scope: !5, file: !3, line: 5, type: !25, scopeLine: 5, flags: DIFlagArtificial | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!31 = !DISubprogram(name: "-[Foo setCustomGetterProp:]", scope: !5, file: !3, line: 7, type: !25, scopeLine: 7, flags: DIFlagArtificial | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) +!32 = !DISubprogram(name: "-[Foo customSetterProp]", scope: !5, file: !3, line: 8, type: !18, scopeLine: 8, flags: DIFlagArtificial | DIFlagPrototyped, spFlags: DISPFlagLocalToUnit) + diff --git a/llvm/test/DebugInfo/X86/base-type-size.ll b/llvm/test/DebugInfo/X86/base-type-size.ll index 3a8dc37..2f0ff2f 100644 --- a/llvm/test/DebugInfo/X86/base-type-size.ll +++ b/llvm/test/DebugInfo/X86/base-type-size.ll @@ -11,7 +11,10 @@ ; CHECK: DW_TAG_base_type ; CHECK-NEXT: DW_AT_name ("DW_ATE_unsigned_1") ; CHECK-NEXT: DW_AT_encoding (DW_ATE_unsigned) +;; TODO: Should this type use bit_size? +; CHECK-NOT: DW_AT_bit_size ; CHECK-NEXT: DW_AT_byte_size (0x01) +; CHECK-NOT: DW_AT_bit_size target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/DebugInfo/bit-int-size.ll b/llvm/test/DebugInfo/bit-int-size.ll new file mode 100644 index 0000000..e28921d --- /dev/null +++ b/llvm/test/DebugInfo/bit-int-size.ll @@ -0,0 +1,38 @@ +; RUN: %llc_dwarf %s -filetype=obj -o - | llvm-dwarfdump - | FileCheck %s +; REQUIRES: object-emission + +;; Check base types with bit-sizes that don't fit fully fit within a byte +;; multiple get both a a byte_size and bit_size attribute. + +; CHECK: DW_TAG_base_type +; CHECK-NEXT: DW_AT_name ("unsigned _BitInt") +; CHECK-NEXT: DW_AT_encoding (DW_ATE_unsigned) +; CHECK-NEXT: DW_AT_byte_size (0x04) +; CHECK-NEXT: DW_AT_bit_size (0x11) + +; CHECK: DW_TAG_base_type +; CHECK-NEXT: DW_AT_name ("_BitInt") +; CHECK-NEXT: DW_AT_encoding (DW_ATE_signed) +; CHECK-NEXT: DW_AT_byte_size (0x01) +; CHECK-NEXT: DW_AT_bit_size (0x02) + +@a = global i8 0, align 1, !dbg !0 +@b = global i8 0, align 1, !dbg !5 + +!llvm.dbg.cu = !{!2} +!llvm.module.flags = !{!10, !11} +!llvm.ident = !{!12} + +!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression()) +!1 = distinct !DIGlobalVariable(name: "a", scope: !2, file: !7, line: 4, type: !9, isLocal: false, isDefinition: true) +!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !3, producer: "clang version 22.0.0git", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, globals: !4, splitDebugInlining: false, nameTableKind: None) +!3 = !DIFile(filename: "bit-int.c", directory: "/") +!4 = !{!0, !5} +!5 = !DIGlobalVariableExpression(var: !6, expr: !DIExpression()) +!6 = distinct !DIGlobalVariable(name: "b", scope: !2, file: !7, line: 5, type: !8, isLocal: false, isDefinition: true) +!7 = !DIFile(filename: "bit-int.c", directory: "/") +!8 = !DIBasicType(name: "_BitInt", size: 8, dataSize: 2, encoding: DW_ATE_signed) +!9 = !DIBasicType(name: "unsigned _BitInt", size: 32, dataSize: 17, encoding: DW_ATE_unsigned) +!10 = !{i32 2, !"Debug Info Version", i32 3} +!11 = !{i32 1, !"wchar_size", i32 4} +!12 = !{!"clang version 22.0.0git"} diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop1.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop1.s index d85ea79..399a644 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_vop1.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop1.s @@ -1,8 +1,8 @@ // NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --unique --version 5 // RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -show-encoding -comment-column=0 %s | FileCheck --strict-whitespace --check-prefixes=GFX12,GFX12-ASM %s -// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -show-encoding %s | sed -n 's#.*\(\[0x[0-9a-fx,]\{1,\}\]\)#\1#p' | llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -disassemble -show-encoding -comment-column=0 | FileCheck --strict-whitespace --check-prefixes=GFX12,GFX12-DIS %s +// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -show-encoding %s | %extract-encodings | llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -disassemble -show-encoding -comment-column=0 | FileCheck --strict-whitespace --check-prefixes=GFX12,GFX12-DIS %s // RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -show-encoding -comment-column=0 %s | FileCheck --strict-whitespace --check-prefixes=GFX12,GFX12-ASM %s -// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -show-encoding %s | sed -n 's#.*\(\[0x[0-9a-fx,]\{1,\}\]\)#\1#p' | llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -disassemble -show-encoding -comment-column=0 | FileCheck --strict-whitespace --check-prefixes=GFX12,GFX12-DIS %s +// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -show-encoding %s | %extract-encodings | llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -disassemble -show-encoding -comment-column=0 | FileCheck --strict-whitespace --check-prefixes=GFX12,GFX12-DIS %s v_bfrev_b32_e32 v5, v1 // GFX12: v_bfrev_b32_e32 v5, v1 ; encoding: [0x01,0x71,0x0a,0x7e] diff --git a/llvm/test/Transforms/DFAJumpThreading/max-outer-uses.ll b/llvm/test/Transforms/DFAJumpThreading/max-outer-uses.ll new file mode 100644 index 0000000..dfcc5b1 --- /dev/null +++ b/llvm/test/Transforms/DFAJumpThreading/max-outer-uses.ll @@ -0,0 +1,326 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -S -passes=dfa-jump-threading -dfa-max-out-use-blocks=5 %s | FileCheck %s + +declare void @use(i32) + +define void @max_outer_uses_by_switch(i32 %cond, ptr %p) { +; CHECK-LABEL: define void @max_outer_uses_by_switch( +; CHECK-SAME: i32 [[COND:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[SWITCH_BB:.*]] +; CHECK: [[SWITCH_BB]]: +; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[DETERMINE:%.*]], %[[SUB_SWITCH_BB:.*]] ], [ 2, %[[CASE2:.*]] ] +; CHECK-NEXT: switch i32 [[PHI]], label %[[DEFAULT_DEST:.*]] [ +; CHECK-NEXT: i32 0, label %[[CASE1:.*]] +; CHECK-NEXT: i32 1, label %[[CASE2]] +; CHECK-NEXT: i32 2, label %[[CASE3:.*]] +; CHECK-NEXT: ] +; CHECK: [[CASE1]]: +; CHECK-NEXT: br label %[[SUB_SWITCH_BB]] +; CHECK: [[CASE3]]: +; CHECK-NEXT: br label %[[SUB_SWITCH_BB]] +; CHECK: [[SUB_SWITCH_BB]]: +; CHECK-NEXT: [[DETERMINE]] = phi i32 [ 1, %[[CASE1]] ], [ 3, %[[CASE3]] ] +; CHECK-NEXT: [[DEF:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: switch i32 [[COND]], label %[[SWITCH_BB]] [ +; CHECK-NEXT: i32 0, label %[[OUTER1:.*]] +; CHECK-NEXT: i32 1, label %[[OUTER2:.*]] +; CHECK-NEXT: i32 2, label %[[OUTER3:.*]] +; CHECK-NEXT: i32 3, label %[[OUTER4:.*]] +; CHECK-NEXT: ] +; CHECK: [[CASE2]]: +; CHECK-NEXT: br label %[[SWITCH_BB]] +; CHECK: [[OUTER1]]: +; CHECK-NEXT: call void @use(i32 [[DEF]]) +; CHECK-NEXT: ret void +; CHECK: [[OUTER2]]: +; CHECK-NEXT: call void @use(i32 [[DEF]]) +; CHECK-NEXT: ret void +; CHECK: [[OUTER3]]: +; CHECK-NEXT: call void @use(i32 [[DEF]]) +; CHECK-NEXT: ret void +; CHECK: [[OUTER4]]: +; CHECK-NEXT: call void @use(i32 [[DEF]]) +; CHECK-NEXT: ret void +; CHECK: [[DEFAULT_DEST]]: +; CHECK-NEXT: ret void +; +entry: + br label %switch_bb + +switch_bb: + %phi = phi i32 [ 0, %entry ], [ %determine, %sub_switch_bb ], [ 2, %case2 ] + switch i32 %phi, label %default_dest [ + i32 0, label %case1 + i32 1, label %case2 + i32 2, label %case3 + ] + +case1: + br label %sub_switch_bb + +case3: + br label %sub_switch_bb + +sub_switch_bb: + %determine = phi i32 [ 1, %case1 ], [ 3, %case3 ] + %def = load i32, ptr %p + switch i32 %cond, label %switch_bb [ + i32 0, label %outer1 + i32 1, label %outer2 + i32 2, label %outer3 + i32 3, label %outer4 + ] + +case2: + br label %switch_bb + +outer1: + call void @use(i32 %def) + ret void + +outer2: + call void @use(i32 %def) + ret void + +outer3: + call void @use(i32 %def) + ret void + +outer4: + call void @use(i32 %def) + ret void + +default_dest: + ret void +} + +define void @less_outer_uses_by_switch(i32 %cond, ptr %p) { +; CHECK-LABEL: define void @less_outer_uses_by_switch( +; CHECK-SAME: i32 [[COND:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[SWITCH_BB:.*]] +; CHECK: [[SWITCH_BB]]: +; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ poison, %[[SUB_SWITCH_BB:.*]] ] +; CHECK-NEXT: switch i32 [[PHI]], label %[[DEFAULT_DEST:.*]] [ +; CHECK-NEXT: i32 0, label %[[CASE1:.*]] +; CHECK-NEXT: i32 1, label %[[CASE2:.*]] +; CHECK-NEXT: i32 2, label %[[CASE3:.*]] +; CHECK-NEXT: ] +; CHECK: [[SWITCH_BB_JT2:.*]]: +; CHECK-NEXT: [[PHI_JT2:%.*]] = phi i32 [ 2, %[[CASE2]] ] +; CHECK-NEXT: br label %[[CASE3]] +; CHECK: [[SWITCH_BB_JT3:.*]]: +; CHECK-NEXT: [[PHI_JT3:%.*]] = phi i32 [ [[DETERMINE_JT3:%.*]], %[[SUB_SWITCH_BB_JT3:.*]] ] +; CHECK-NEXT: br label %[[DEFAULT_DEST]] +; CHECK: [[SWITCH_BB_JT1:.*]]: +; CHECK-NEXT: [[PHI_JT1:%.*]] = phi i32 [ [[DETERMINE_JT1:%.*]], %[[SUB_SWITCH_BB_JT1:.*]] ] +; CHECK-NEXT: br label %[[CASE2]] +; CHECK: [[CASE1]]: +; CHECK-NEXT: br label %[[SUB_SWITCH_BB_JT1]] +; CHECK: [[CASE3]]: +; CHECK-NEXT: br label %[[SUB_SWITCH_BB_JT3]] +; CHECK: [[SUB_SWITCH_BB]]: +; CHECK-NEXT: [[DEF:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: switch i32 [[COND]], label %[[SWITCH_BB]] [ +; CHECK-NEXT: i32 0, label %[[OUTER1:.*]] +; CHECK-NEXT: ] +; CHECK: [[SUB_SWITCH_BB_JT3]]: +; CHECK-NEXT: [[DETERMINE_JT3]] = phi i32 [ 3, %[[CASE3]] ] +; CHECK-NEXT: [[DEF_JT3:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: switch i32 [[COND]], label %[[SWITCH_BB_JT3]] [ +; CHECK-NEXT: i32 0, label %[[OUTER1]] +; CHECK-NEXT: ] +; CHECK: [[SUB_SWITCH_BB_JT1]]: +; CHECK-NEXT: [[DETERMINE_JT1]] = phi i32 [ 1, %[[CASE1]] ] +; CHECK-NEXT: [[DEF_JT1:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: switch i32 [[COND]], label %[[SWITCH_BB_JT1]] [ +; CHECK-NEXT: i32 0, label %[[OUTER1]] +; CHECK-NEXT: ] +; CHECK: [[CASE2]]: +; CHECK-NEXT: br label %[[SWITCH_BB_JT2]] +; CHECK: [[OUTER1]]: +; CHECK-NEXT: [[DEF1:%.*]] = phi i32 [ [[DEF_JT3]], %[[SUB_SWITCH_BB_JT3]] ], [ [[DEF_JT1]], %[[SUB_SWITCH_BB_JT1]] ], [ [[DEF]], %[[SUB_SWITCH_BB]] ] +; CHECK-NEXT: call void @use(i32 [[DEF1]]) +; CHECK-NEXT: ret void +; CHECK: [[DEFAULT_DEST]]: +; CHECK-NEXT: ret void +; +entry: + br label %switch_bb + +switch_bb: + %phi = phi i32 [ 0, %entry ], [ %determine, %sub_switch_bb ], [ 2, %case2 ] + switch i32 %phi, label %default_dest [ + i32 0, label %case1 + i32 1, label %case2 + i32 2, label %case3 + ] + +case1: + br label %sub_switch_bb + +case3: + br label %sub_switch_bb + +sub_switch_bb: + %determine = phi i32 [ 1, %case1 ], [ 3, %case3 ] + %def = load i32, ptr %p + switch i32 %cond, label %switch_bb [ + i32 0, label %outer1 + ] + +case2: + br label %switch_bb + +outer1: + call void @use(i32 %def) + ret void + +default_dest: + ret void +} + + +define void @max_outer_uses_multi_preds(i32 %cond, ptr %p) { +; CHECK-LABEL: define void @max_outer_uses_multi_preds( +; CHECK-SAME: i32 [[COND:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[SWITCH_BB:.*]] +; CHECK: [[SWITCH_BB]]: +; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ poison, %[[SUB_SWITCH_BB:.*]] ] +; CHECK-NEXT: switch i32 [[PHI]], label %[[DEFAULT_DEST:.*]] [ +; CHECK-NEXT: i32 0, label %[[CASE1:.*]] +; CHECK-NEXT: i32 1, label %[[CASE2:.*]] +; CHECK-NEXT: i32 2, label %[[CASE3:.*]] +; CHECK-NEXT: i32 3, label %[[CASE4:.*]] +; CHECK-NEXT: ] +; CHECK: [[SWITCH_BB_JT2:.*]]: +; CHECK-NEXT: [[PHI_JT2:%.*]] = phi i32 [ 2, %[[CASE2]] ] +; CHECK-NEXT: br label %[[CASE3]] +; CHECK: [[SWITCH_BB_JT3:.*]]: +; CHECK-NEXT: [[PHI_JT3:%.*]] = phi i32 [ [[DETERMINE_JT3:%.*]], %[[SUB_SWITCH_BB_JT3:.*]] ] +; CHECK-NEXT: br label %[[CASE4]] +; CHECK: [[SWITCH_BB_JT1:.*]]: +; CHECK-NEXT: [[PHI_JT1:%.*]] = phi i32 [ [[DETERMINE_JT1:%.*]], %[[SUB_SWITCH_BB_JT1:.*]] ] +; CHECK-NEXT: br label %[[CASE2]] +; CHECK: [[CASE1]]: +; CHECK-NEXT: br label %[[SUB_SWITCH_BB_JT1]] +; CHECK: [[CASE3]]: +; CHECK-NEXT: br label %[[SUB_SWITCH_BB_JT3]] +; CHECK: [[SUB_SWITCH_BB]]: +; CHECK-NEXT: [[DEF:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: switch i32 [[COND]], label %[[SWITCH_BB]] [ +; CHECK-NEXT: i32 0, label %[[OUTER1:.*]] +; CHECK-NEXT: i32 1, label %[[OUTER2:.*]] +; CHECK-NEXT: i32 2, label %[[OUTER3:.*]] +; CHECK-NEXT: i32 3, label %[[OUTER4:.*]] +; CHECK-NEXT: ] +; CHECK: [[SUB_SWITCH_BB_JT3]]: +; CHECK-NEXT: [[DETERMINE_JT3]] = phi i32 [ 3, %[[CASE3]] ] +; CHECK-NEXT: [[DEF_JT3:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: switch i32 [[COND]], label %[[SWITCH_BB_JT3]] [ +; CHECK-NEXT: i32 0, label %[[OUTER1]] +; CHECK-NEXT: i32 1, label %[[OUTER2]] +; CHECK-NEXT: i32 2, label %[[OUTER3]] +; CHECK-NEXT: i32 3, label %[[OUTER4]] +; CHECK-NEXT: ] +; CHECK: [[SUB_SWITCH_BB_JT1]]: +; CHECK-NEXT: [[DETERMINE_JT1]] = phi i32 [ 1, %[[CASE1]] ] +; CHECK-NEXT: [[DEF_JT1:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: switch i32 [[COND]], label %[[SWITCH_BB_JT1]] [ +; CHECK-NEXT: i32 0, label %[[OUTER1]] +; CHECK-NEXT: i32 1, label %[[OUTER2]] +; CHECK-NEXT: i32 2, label %[[OUTER3]] +; CHECK-NEXT: i32 3, label %[[OUTER4]] +; CHECK-NEXT: ] +; CHECK: [[CASE4]]: +; CHECK-NEXT: [[DEF1:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: switch i32 [[COND]], label %[[OUTER4]] [ +; CHECK-NEXT: i32 0, label %[[OUTER1]] +; CHECK-NEXT: i32 1, label %[[OUTER2]] +; CHECK-NEXT: i32 2, label %[[OUTER3]] +; CHECK-NEXT: ] +; CHECK: [[CASE2]]: +; CHECK-NEXT: br label %[[SWITCH_BB_JT2]] +; CHECK: [[OUTER1]]: +; CHECK-NEXT: [[PHI1:%.*]] = phi i32 [ [[DEF]], %[[SUB_SWITCH_BB]] ], [ [[DEF1]], %[[CASE4]] ], [ [[DEF_JT1]], %[[SUB_SWITCH_BB_JT1]] ], [ [[DEF_JT3]], %[[SUB_SWITCH_BB_JT3]] ] +; CHECK-NEXT: call void @use(i32 [[PHI1]]) +; CHECK-NEXT: ret void +; CHECK: [[OUTER2]]: +; CHECK-NEXT: [[PHI2:%.*]] = phi i32 [ [[DEF]], %[[SUB_SWITCH_BB]] ], [ [[DEF1]], %[[CASE4]] ], [ [[DEF_JT1]], %[[SUB_SWITCH_BB_JT1]] ], [ [[DEF_JT3]], %[[SUB_SWITCH_BB_JT3]] ] +; CHECK-NEXT: call void @use(i32 [[PHI2]]) +; CHECK-NEXT: ret void +; CHECK: [[OUTER3]]: +; CHECK-NEXT: [[PHI3:%.*]] = phi i32 [ [[DEF]], %[[SUB_SWITCH_BB]] ], [ [[DEF1]], %[[CASE4]] ], [ [[DEF_JT1]], %[[SUB_SWITCH_BB_JT1]] ], [ [[DEF_JT3]], %[[SUB_SWITCH_BB_JT3]] ] +; CHECK-NEXT: call void @use(i32 [[PHI3]]) +; CHECK-NEXT: ret void +; CHECK: [[OUTER4]]: +; CHECK-NEXT: [[PHI4:%.*]] = phi i32 [ [[DEF]], %[[SUB_SWITCH_BB]] ], [ [[DEF1]], %[[CASE4]] ], [ [[DEF_JT1]], %[[SUB_SWITCH_BB_JT1]] ], [ [[DEF_JT3]], %[[SUB_SWITCH_BB_JT3]] ] +; CHECK-NEXT: call void @use(i32 [[PHI4]]) +; CHECK-NEXT: ret void +; CHECK: [[DEFAULT_DEST]]: +; CHECK-NEXT: ret void +; +entry: + br label %switch_bb + +switch_bb: + %phi = phi i32 [ 0, %entry ], [ %determine, %sub_switch_bb ], [ 2, %case2 ] + switch i32 %phi, label %default_dest [ + i32 0, label %case1 + i32 1, label %case2 + i32 2, label %case3 + i32 3, label %case4 + ] + +case1: + br label %sub_switch_bb + +case3: + br label %sub_switch_bb + +sub_switch_bb: + %determine = phi i32 [ 1, %case1 ], [ 3, %case3 ] + %def = load i32, ptr %p + switch i32 %cond, label %switch_bb [ + i32 0, label %outer1 + i32 1, label %outer2 + i32 2, label %outer3 + i32 3, label %outer4 + ] + +case4: + %def1 = load i32, ptr %p + switch i32 %cond, label %outer4 [ + i32 0, label %outer1 + i32 1, label %outer2 + i32 2, label %outer3 + ] + +case2: + br label %switch_bb + +outer1: + %phi1 = phi i32 [ %def, %sub_switch_bb ], [ %def1, %case4 ] + call void @use(i32 %phi1) + ret void + +outer2: + %phi2 = phi i32 [ %def, %sub_switch_bb ], [ %def1, %case4 ] + call void @use(i32 %phi2) + ret void + +outer3: + %phi3 = phi i32 [ %def, %sub_switch_bb ], [ %def1, %case4 ] + call void @use(i32 %phi3) + ret void + +outer4: + %phi4 = phi i32 [ %def, %sub_switch_bb ], [ %def1, %case4 ] + call void @use(i32 %phi4) + ret void + +default_dest: + ret void +} diff --git a/llvm/test/Transforms/FixIrreducible/bug45623.ll b/llvm/test/Transforms/FixIrreducible/bug45623.ll index 5872443..b6dd6fb 100644 --- a/llvm/test/Transforms/FixIrreducible/bug45623.ll +++ b/llvm/test/Transforms/FixIrreducible/bug45623.ll @@ -90,3 +90,112 @@ for.end626: ; preds = %for.cond616 if.else629: ; preds = %backtrack br label %retry } + +define void @tre_tnfa_run_backtrack_callbr(i1 %arg) { +; CHECK-LABEL: @tre_tnfa_run_backtrack_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[RETRY:%.*]] [] +; CHECK: retry: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[ARG:%.*]]) +; CHECK-NEXT: to label [[RETRY_TARGET_BACKTRACK:%.*]] [label %retry.target.while.body248] +; CHECK: while.body248: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[ARG]]) +; CHECK-NEXT: to label [[IF_THEN250:%.*]] [label %if.end275] +; CHECK: if.then250: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[FOR_COND264:%.*]] [] +; CHECK: for.cond264: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[ARG]]) +; CHECK-NEXT: to label [[FOR_BODY267:%.*]] [label %backtrack] +; CHECK: for.body267: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[FOR_COND264]] [] +; CHECK: if.end275: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[FOR_COND342:%.*]] [] +; CHECK: for.cond342: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[ARG]]) +; CHECK-NEXT: to label [[FOR_BODY345:%.*]] [label %for.end580] +; CHECK: for.body345: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[FOR_COND342]] [] +; CHECK: for.end580: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[BACKTRACK:%.*]] [] +; CHECK: backtrack: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[ARG]]) +; CHECK-NEXT: to label [[IF_THEN595:%.*]] [label %if.else629] +; CHECK: if.then595: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[FOR_COND616:%.*]] [] +; CHECK: for.cond616: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[ARG]]) +; CHECK-NEXT: to label [[FOR_BODY619:%.*]] [label %for.end626] +; CHECK: for.body619: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[FOR_COND616]] [] +; CHECK: for.end626: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[FOR_END626_TARGET_WHILE_BODY248:%.*]] [] +; CHECK: if.else629: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[RETRY]] [] +; CHECK: for.end626.target.while.body248: +; CHECK-NEXT: br label [[IRR_GUARD:%.*]] +; CHECK: retry.target.backtrack: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: retry.target.while.body248: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: irr.guard: +; CHECK-NEXT: [[GUARD_WHILE_BODY248:%.*]] = phi i1 [ true, [[FOR_END626_TARGET_WHILE_BODY248]] ], [ false, [[RETRY_TARGET_BACKTRACK]] ], [ true, [[RETRY_TARGET_WHILE_BODY248:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_WHILE_BODY248]], label [[WHILE_BODY248:%.*]], label [[BACKTRACK]] +; +entry: + callbr void asm "", ""() to label %retry [] + +retry: + callbr void asm "", "r,!i"(i1 %arg) to label %backtrack [label %while.body248] + +while.body248: ; preds = %for.end626, %retry + callbr void asm "", "r,!i"(i1 %arg) to label %if.then250 [label %if.end275] + +if.then250: ; preds = %while.body248 + callbr void asm "", ""() to label %for.cond264 [] + +for.cond264: ; preds = %for.body267, %if.then250 + callbr void asm "", "r,!i"(i1 %arg) to label %for.body267 [label %backtrack] + +for.body267: ; preds = %for.cond264 + callbr void asm "", ""() to label %for.cond264 [] + +if.end275: ; preds = %while.body248 + callbr void asm "", ""() to label %for.cond342 [] + +for.cond342: ; preds = %for.body345, %if.end275 + callbr void asm "", "r,!i"(i1 %arg) to label %for.body345 [label %for.end580] + +for.body345: ; preds = %for.cond342 + callbr void asm "", ""() to label %for.cond342 [] + +for.end580: ; preds = %for.cond342 + callbr void asm "", ""() to label %backtrack [] + +backtrack: ; preds = %for.end580, %for.cond264, %retry + callbr void asm "", "r,!i"(i1 %arg) to label %if.then595 [label %if.else629] + +if.then595: ; preds = %backtrack + callbr void asm "", ""() to label %for.cond616 [] + +for.cond616: ; preds = %for.body619, %if.then595 + callbr void asm "", "r,!i"(i1 %arg) to label %for.body619 [label %for.end626] + +for.body619: ; preds = %for.cond616 + callbr void asm "", ""() to label %for.cond616 [] + +for.end626: ; preds = %for.cond616 + callbr void asm "", ""() to label %while.body248 [] + +if.else629: ; preds = %backtrack + callbr void asm "", ""() to label %retry [] +} diff --git a/llvm/test/Transforms/FixIrreducible/callbr.ll b/llvm/test/Transforms/FixIrreducible/callbr.ll new file mode 100644 index 0000000..26ca6c7 --- /dev/null +++ b/llvm/test/Transforms/FixIrreducible/callbr.ll @@ -0,0 +1,869 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes='fix-irreducible,verify<loops>' -S | FileCheck %s +; RUN: opt < %s -passes='verify<loops>,fix-irreducible,verify<loops>' -S | FileCheck %s +; RUN: opt < %s -passes='print<cycles>' -disable-output 2>&1 | FileCheck %s --check-prefix CYCLES-BEFORE +; RUN: opt < %s -passes='fix-irreducible,print<cycles>' -disable-output 2>&1 | FileCheck %s --check-prefix CYCLES-AFTER + +; CYCLES-BEFORE: CycleInfo for function: callbr_entry +; CYCLES-BEFORE-NEXT: depth=1: entries(indirect fallthrough) +; CYCLES-AFTER: CycleInfo for function: callbr_entry +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) indirect fallthrough + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_entry_targets_with_phi_nodes +; CYCLES-BEFORE-NEXT: depth=1: entries(block1 block) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_entry_targets_with_phi_nodes +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) block1 block + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_entry_multiple_indirect_targets +; CYCLES-BEFORE-NEXT: depth=1: entries(indirect fallthrough) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_entry_multiple_indirect_targets +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) indirect fallthrough + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_entry_multiple_indirect_targets1 +; CYCLES-BEFORE-NEXT: depth=1: entries(indirect1 indirect fallthrough) +; CYCLES-BEFORE-NEXT: depth=2: entries(indirect fallthrough) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_entry_multiple_indirect_targets1 +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) indirect1 indirect fallthrough irr.guard1 irr.guard2 +; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard2) indirect fallthrough + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_header_no_indirect +; CYCLES-BEFORE-NEXT: depth=1: entries(fallthrough callbr) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_header_no_indirect +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) fallthrough callbr callbr.target.fallthrough + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_header +; CYCLES-BEFORE-NEXT: depth=1: entries(fallthrough callbr) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_header +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) fallthrough callbr callbr.target.fallthrough + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_header_multiple_indirect_targets +; CYCLES-BEFORE-NEXT: depth=1: entries(fallthrough callbr) indirect1 +; CYCLES-BEFORE-NEXT: depth=2: entries(callbr) indirect1 +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_header_multiple_indirect_targets +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) fallthrough callbr indirect1 callbr.target.fallthrough +; CYCLES-AFTER-NEXT: depth=2: entries(callbr) indirect1 + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_regular +; CYCLES-BEFORE-NEXT: depth=1: entries(fallthrough2 fallthrough1) +; CYCLES-BEFORE-NEXT: depth=1: entries(indirect2 indirect1) +; CYCLES-BEFORE-NEXT: depth=1: entries(nocallbr2 nocallbr1) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_regular +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) fallthrough2 fallthrough1 +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard1) indirect2 indirect1 +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard2) nocallbr2 nocallbr1 + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_regular1 +; CYCLES-BEFORE-NEXT: depth=1: entries(callbr nocallbr) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_regular1 +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) callbr nocallbr + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_regular2 +; CYCLES-BEFORE-NEXT: depth=1: entries(callbr nocallbr) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_regular2 +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) callbr nocallbr + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_header_and_regular +; CYCLES-BEFORE-NEXT: depth=1: entries(callbr_header) callbr_regular mid +; CYCLES-BEFORE-NEXT: depth=2: entries(callbr_regular mid) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_header_and_regular +; CYCLES-AFTER-NEXT: depth=1: entries(callbr_header) callbr_regular mid callbr_header.target.mid callbr_header.target.callbr_regular irr.guard +; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard) callbr_regular mid + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_only +; CYCLES-BEFORE-NEXT: depth=1: entries(callbr_block callbr_header) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_only +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) callbr_block callbr_header callbr_header.target.callbr_block + +; CYCLES-BEFORE-NEXT: CycleInfo for function: entry_multiple_callbr +; CYCLES-BEFORE-NEXT: depth=1: entries(cb2 block block1) +; CYCLES-BEFORE-NEXT: depth=2: entries(block block1) +; CYCLES-AFTER-NEXT: CycleInfo for function: entry_multiple_callbr +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) cb2 block block1 irr.guard1 cb2.target.block1 cb2.target.block irr.guard2 +; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard2) block block1 + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_exit_with_separate_entries +; CYCLES-BEFORE-NEXT: depth=1: entries(l2 l1) cb +; CYCLES-BEFORE-NEXT: depth=2: entries(l1 cb) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_exit_with_separate_entries +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) l2 l1 cb cb.target.l1 irr.guard1 +; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard1) l1 cb cb.target.l1 + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_exit_with_separate_entries1 +; CYCLES-BEFORE-NEXT: depth=1: entries(loop2 loop1) cb +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_exit_with_separate_entries1 +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) loop2 loop1 cb cb.target.loop2 + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_only_multiple +; CYCLES-BEFORE-NEXT: depth=1: entries(cb3 cb1 cb2) +; CYCLES-BEFORE-NEXT: depth=2: entries(cb1 cb2) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_only_multiple +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) cb3 cb1 cb2 cb2.target.cb3 cb1.target.cb3 irr.guard1 cb2.target.cb1 cb3.target.cb1 irr.guard2 +; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard2) cb1 cb2 cb2.target.cb1 + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_bypass +; CYCLES-BEFORE-NEXT: depth=1: entries(l1 cb) l2 +; CYCLES-BEFORE-NEXT: depth=2: entries(cb l2) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_bypass +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) l1 cb l2 cb.target.l1 irr.guard1 +; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard1) cb l2 + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_multiple_with_exit +; CYCLES-BEFORE-NEXT: depth=1: entries(l3 l1 l2) +; CYCLES-BEFORE-NEXT: depth=2: entries(l1 l2) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_multiple_with_exit +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) l3 l1 l2 irr.guard1 irr.guard2 +; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard2) l1 l2 + +; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_nested +; CYCLES-BEFORE-NEXT: depth=1: entries(bb bh) +; CYCLES-BEFORE-NEXT: depth=1: entries(b h) +; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_nested +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) bb bh +; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard1) b h + +; Fix the irreducible loop in which callbr is the entry (see description at the +; top of FixIrreducible.cpp). +define void @callbr_entry(i1 %c) { +; CHECK-LABEL: define void @callbr_entry( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT: [[CALLBR:.*:]] +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[CALLBR_TARGET_FALLTHROUGH:.*]] [label %callbr.target.indirect] +; CHECK: [[FALLTHROUGH:.*]]: +; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD:.*]], label %[[RET:.*]] +; CHECK: [[INDIRECT:.*]]: +; CHECK-NEXT: br label %[[FALLTHROUGH]] +; CHECK: [[RET]]: +; CHECK-NEXT: ret void +; CHECK: [[CALLBR_TARGET_FALLTHROUGH]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[CALLBR_TARGET_INDIRECT:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_INDIRECT:%.*]] = phi i1 [ true, %[[FALLTHROUGH]] ], [ false, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ true, %[[CALLBR_TARGET_INDIRECT]] ] +; CHECK-NEXT: br i1 [[GUARD_INDIRECT]], label %[[INDIRECT]], label %[[FALLTHROUGH]] +; +callbr: + callbr void asm "", "!i"() to label %fallthrough [label %indirect] +fallthrough: + br i1 %c, label %indirect, label %ret +indirect: + br label %fallthrough +ret: + ret void +} + +define i32 @callbr_entry_targets_with_phi_nodes(i1 %c) { +; CHECK-LABEL: define i32 @callbr_entry_targets_with_phi_nodes( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[ENTRY_TARGET_BLOCK:.*]] [label %entry.target.block1] +; CHECK: [[BLOCK:.*]]: +; CHECK-NEXT: [[A:%.*]] = phi i32 [ 1, %[[BLOCK1:.*]] ], [ [[A_MOVED:%.*]], %[[IRR_GUARD:.*]] ] +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[BLOCK1]]: +; CHECK-NEXT: br i1 [[C]], label %[[BLOCK]], label %[[RET:.*]] +; CHECK: [[RET]]: +; CHECK-NEXT: ret i32 [[B_MOVED:%.*]] +; CHECK: [[ENTRY_TARGET_BLOCK]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[ENTRY_TARGET_BLOCK1:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[A_MOVED]] = phi i32 [ poison, %[[BLOCK]] ], [ 42, %[[ENTRY_TARGET_BLOCK]] ], [ poison, %[[ENTRY_TARGET_BLOCK1]] ] +; CHECK-NEXT: [[B_MOVED]] = phi i32 [ [[A]], %[[BLOCK]] ], [ poison, %[[ENTRY_TARGET_BLOCK]] ], [ 43, %[[ENTRY_TARGET_BLOCK1]] ] +; CHECK-NEXT: [[GUARD_BLOCK1:%.*]] = phi i1 [ true, %[[BLOCK]] ], [ false, %[[ENTRY_TARGET_BLOCK]] ], [ true, %[[ENTRY_TARGET_BLOCK1]] ] +; CHECK-NEXT: br i1 [[GUARD_BLOCK1]], label %[[BLOCK1]], label %[[BLOCK]] +; +entry: + callbr void asm "", "!i"() to label %block [label %block1] +block: + %a = phi i32 [42, %entry], [1, %block1] + br label %block1 +block1: + %b = phi i32 [43, %entry], [%a, %block] + br i1 %c, label %block, label %ret +ret: + ret i32 %b +} + +define void @callbr_entry_multiple_indirect_targets(i1 %c) { +; CHECK-LABEL: define void @callbr_entry_multiple_indirect_targets( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT: [[CALLBR:.*:]] +; CHECK-NEXT: callbr void asm "", "!i,!i,!i"() +; CHECK-NEXT: to label %[[CALLBR_TARGET_FALLTHROUGH:.*]] [label %[[CALLBR_TARGET_INDIRECT:.*]], label %[[INDIRECT1:.*]], label %indirect2] +; CHECK: [[INDIRECT3:.*]]: +; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD:.*]], label %[[RET:.*]] +; CHECK: [[INDIRECT:.*]]: +; CHECK-NEXT: br label %[[INDIRECT3]] +; CHECK: [[INDIRECT1]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[INDIRECT2:.*:]] +; CHECK-NEXT: br label %[[RET]] +; CHECK: [[RET]]: +; CHECK-NEXT: ret void +; CHECK: [[CALLBR_TARGET_FALLTHROUGH]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[CALLBR_TARGET_INDIRECT]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_INDIRECT:%.*]] = phi i1 [ true, %[[INDIRECT3]] ], [ true, %[[INDIRECT1]] ], [ false, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ true, %[[CALLBR_TARGET_INDIRECT]] ] +; CHECK-NEXT: br i1 [[GUARD_INDIRECT]], label %[[INDIRECT]], label %[[INDIRECT3]] +; +callbr: + callbr void asm "", "!i,!i,!i"() to label %fallthrough [label %indirect, label %indirect1, label %indirect2] +fallthrough: + br i1 %c, label %indirect, label %ret +indirect: + br label %fallthrough +indirect1: + br label %indirect +indirect2: + br label %ret +ret: + ret void +} + +define void @callbr_entry_multiple_indirect_targets1(i1 %c, i1 %d) { +; CHECK-LABEL: define void @callbr_entry_multiple_indirect_targets1( +; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]]) { +; CHECK-NEXT: [[CALLBR:.*:]] +; CHECK-NEXT: callbr void asm "", "!i,!i,!i"() +; CHECK-NEXT: to label %[[CALLBR_TARGET_FALLTHROUGH:.*]] [label %[[CALLBR_TARGET_INDIRECT:.*]], label %[[CALLBR_TARGET_INDIRECT1:.*]], label %indirect2] +; CHECK: [[INDIRECT3:.*]]: +; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD2:.*]], label %[[RET:.*]] +; CHECK: [[INDIRECT:.*]]: +; CHECK-NEXT: br i1 [[D]], label %[[INDIRECT3]], label %[[IRR_GUARD:.*]] +; CHECK: [[INDIRECT1:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD2]] +; CHECK: [[INDIRECT2:.*:]] +; CHECK-NEXT: br label %[[RET]] +; CHECK: [[RET]]: +; CHECK-NEXT: ret void +; CHECK: [[CALLBR_TARGET_FALLTHROUGH]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[CALLBR_TARGET_INDIRECT]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[CALLBR_TARGET_INDIRECT1]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_INDIRECT1:%.*]] = phi i1 [ true, %[[INDIRECT]] ], [ false, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ false, %[[CALLBR_TARGET_INDIRECT]] ], [ true, %[[CALLBR_TARGET_INDIRECT1]] ] +; CHECK-NEXT: [[GUARD_FALLTHROUGH:%.*]] = phi i1 [ false, %[[INDIRECT]] ], [ true, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ false, %[[CALLBR_TARGET_INDIRECT]] ], [ false, %[[CALLBR_TARGET_INDIRECT1]] ] +; CHECK-NEXT: [[GUARD_FALLTHROUGH_INV:%.*]] = xor i1 [[GUARD_FALLTHROUGH]], true +; CHECK-NEXT: br i1 [[GUARD_INDIRECT1]], label %[[INDIRECT1]], label %[[IRR_GUARD1:.*]] +; CHECK: [[IRR_GUARD1]]: +; CHECK-NEXT: br label %[[IRR_GUARD2]] +; CHECK: [[IRR_GUARD2]]: +; CHECK-NEXT: [[GUARD_INDIRECT:%.*]] = phi i1 [ true, %[[INDIRECT3]] ], [ [[GUARD_FALLTHROUGH_INV]], %[[IRR_GUARD1]] ], [ true, %[[INDIRECT1]] ] +; CHECK-NEXT: br i1 [[GUARD_INDIRECT]], label %[[INDIRECT]], label %[[INDIRECT3]] +; +callbr: + callbr void asm "", "!i,!i,!i"() to label %fallthrough [label %indirect, label %indirect1, label %indirect2] +fallthrough: + br i1 %c, label %indirect, label %ret +indirect: + br i1 %d, label %fallthrough, label %indirect1 +indirect1: + br label %indirect +indirect2: + br label %ret +ret: + ret void +} + +; Fix the irreducible loop in which callbr is the header (see the example at the +; top of FixIrreducible.cpp). +define void @callbr_header_no_indirect(i1 %c, i1 %d) { +; CHECK-LABEL: define void @callbr_header_no_indirect( +; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]]) { +; CHECK-NEXT: [[D_INV:%.*]] = xor i1 [[D]], true +; CHECK-NEXT: br label %[[IRR_GUARD:.*]] +; CHECK: [[CALLBR:.*]]: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label %[[CALLBR_TARGET_FALLTHROUGH:.*]] [] +; CHECK: [[FALLTHROUGH:.*]]: +; CHECK-NEXT: br i1 [[C]], label %[[CALLBR]], label %[[RET:.*]] +; CHECK: [[RET]]: +; CHECK-NEXT: ret void +; CHECK: [[CALLBR_TARGET_FALLTHROUGH]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_FALLTHROUGH:%.*]] = phi i1 [ true, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ [[D_INV]], [[TMP0:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_FALLTHROUGH]], label %[[FALLTHROUGH]], label %[[CALLBR]] +; + br i1 %d, label %callbr, label %fallthrough +callbr: + callbr void asm "", ""() to label %fallthrough [] +fallthrough: + br i1 %c, label %callbr, label %ret +ret: + ret void +} + +; Fix the irreducible loop in which callbr is the header. +define void @callbr_header(i1 %c, i1 %d) { +; CHECK-LABEL: define void @callbr_header( +; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]]) { +; CHECK-NEXT: [[D_INV:%.*]] = xor i1 [[D]], true +; CHECK-NEXT: br label %[[IRR_GUARD:.*]] +; CHECK: [[CALLBR:.*]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[CALLBR_TARGET_FALLTHROUGH:.*]] [label %indirect] +; CHECK: [[INDIRECT:.*:]] +; CHECK-NEXT: br label %[[RET:.*]] +; CHECK: [[FALLTHROUGH:.*]]: +; CHECK-NEXT: br i1 [[C]], label %[[CALLBR]], label %[[RET]] +; CHECK: [[RET]]: +; CHECK-NEXT: ret void +; CHECK: [[CALLBR_TARGET_FALLTHROUGH]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_FALLTHROUGH:%.*]] = phi i1 [ true, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ [[D_INV]], [[TMP0:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_FALLTHROUGH]], label %[[FALLTHROUGH]], label %[[CALLBR]] +; + br i1 %d, label %callbr, label %fallthrough +callbr: + callbr void asm "", "!i"() to label %fallthrough [label %indirect] +indirect: + br label %ret +fallthrough: + br i1 %c, label %callbr, label %ret +ret: + ret void +} + +define void @callbr_header_multiple_indirect_targets(i1 %c, i1 %d) { +; CHECK-LABEL: define void @callbr_header_multiple_indirect_targets( +; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]]) { +; CHECK-NEXT: [[D_INV:%.*]] = xor i1 [[D]], true +; CHECK-NEXT: br label %[[IRR_GUARD:.*]] +; CHECK: [[CALLBR:.*]]: +; CHECK-NEXT: callbr void asm "", "!i,!i"() +; CHECK-NEXT: to label %[[CALLBR_TARGET_FALLTHROUGH:.*]] [label %[[INDIRECT1:.*]], label %indirect1] +; CHECK: [[INDIRECT1]]: +; CHECK-NEXT: br label %[[RET:.*]] +; CHECK: [[INDIRECT2:.*:]] +; CHECK-NEXT: br label %[[CALLBR]] +; CHECK: [[FALLTHROUGH:.*]]: +; CHECK-NEXT: br i1 [[C]], label %[[CALLBR]], label %[[RET]] +; CHECK: [[RET]]: +; CHECK-NEXT: ret void +; CHECK: [[CALLBR_TARGET_FALLTHROUGH]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_FALLTHROUGH:%.*]] = phi i1 [ true, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ [[D_INV]], [[TMP0:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_FALLTHROUGH]], label %[[FALLTHROUGH]], label %[[CALLBR]] +; + br i1 %d, label %callbr, label %fallthrough +callbr: + callbr void asm "", "!i,!i"() to label %fallthrough [label %indirect, label %indirect1] +indirect: + br label %ret +indirect1: + br label %callbr +fallthrough: + br i1 %c, label %callbr, label %ret +ret: + ret void +} + +; Fix the three usual irreducible loops (callbr isn't a part of one of them): +; - fallthrough, fallthrough1, fallthrough2 +; - indirect, indirect1, indirect2 +; - nocallbr, nocallbr1, nocallbr2 +define void @callbr_regular(i1 %c, i1 %d) { +; CHECK-LABEL: define void @callbr_regular( +; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]]) { +; CHECK-NEXT: [[C_INV:%.*]] = xor i1 [[C]], true +; CHECK-NEXT: br i1 [[D]], label %[[CALLBR:.*]], label %[[NOCALLBR:.*]] +; CHECK: [[CALLBR]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[FALLTHROUGH:.*]] [label %indirect] +; CHECK: [[FALLTHROUGH]]: +; CHECK-NEXT: br label %[[IRR_GUARD:.*]] +; CHECK: [[FALLTHROUGH1:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[FALLTHROUGH2:.*]]: +; CHECK-NEXT: br i1 [[D]], label %[[FALLTHROUGH1]], label %[[RET:.*]] +; CHECK: [[INDIRECT:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD1:.*]] +; CHECK: [[INDIRECT1:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD1]] +; CHECK: [[INDIRECT2:.*]]: +; CHECK-NEXT: br i1 [[D]], label %[[INDIRECT1]], label %[[RET]] +; CHECK: [[NOCALLBR]]: +; CHECK-NEXT: br label %[[IRR_GUARD2:.*]] +; CHECK: [[NOCALLBR1:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD2]] +; CHECK: [[NOCALLBR2:.*]]: +; CHECK-NEXT: br i1 [[D]], label %[[NOCALLBR1]], label %[[RET]] +; CHECK: [[RET]]: +; CHECK-NEXT: ret void +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_FALLTHROUGH2:%.*]] = phi i1 [ true, %[[FALLTHROUGH1]] ], [ [[C_INV]], %[[FALLTHROUGH]] ] +; CHECK-NEXT: br i1 [[GUARD_FALLTHROUGH2]], label %[[FALLTHROUGH2]], label %[[FALLTHROUGH1]] +; CHECK: [[IRR_GUARD1]]: +; CHECK-NEXT: [[GUARD_INDIRECT2:%.*]] = phi i1 [ true, %[[INDIRECT1]] ], [ [[C_INV]], %[[INDIRECT]] ] +; CHECK-NEXT: br i1 [[GUARD_INDIRECT2]], label %[[INDIRECT2]], label %[[INDIRECT1]] +; CHECK: [[IRR_GUARD2]]: +; CHECK-NEXT: [[GUARD_NOCALLBR2:%.*]] = phi i1 [ true, %[[NOCALLBR1]] ], [ [[C_INV]], %[[NOCALLBR]] ] +; CHECK-NEXT: br i1 [[GUARD_NOCALLBR2]], label %[[NOCALLBR2]], label %[[NOCALLBR1]] +; + br i1 %d, label %callbr, label %nocallbr +callbr: + callbr void asm "", "!i"() to label %fallthrough [label %indirect] +fallthrough: + br i1 %c, label %fallthrough1, label %fallthrough2 +fallthrough1: + br label %fallthrough2 +fallthrough2: + br i1 %d, label %fallthrough1, label %ret +indirect: + br i1 %c, label %indirect1, label %indirect2 +indirect1: + br label %indirect2 +indirect2: + br i1 %d, label %indirect1, label %ret +nocallbr: + br i1 %c, label %nocallbr1, label %nocallbr2 +nocallbr1: + br label %nocallbr2 +nocallbr2: + br i1 %d, label %nocallbr1, label %ret +ret: + ret void +} + +; Fix an irreducible loop in which callbr is a regular block (neither entry nor +; header). See the example at the top of FixIrreducible.cpp. +define void @callbr_regular1(i1 %c) { +; CHECK-LABEL: define void @callbr_regular1( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT: [[C_INV:%.*]] = xor i1 [[C]], true +; CHECK-NEXT: br label %[[IRR_GUARD:.*]] +; CHECK: [[NOCALLBR:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[CALLBR:.*]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[RET:.*]] [label %nocallbr] +; CHECK: [[RET]]: +; CHECK-NEXT: ret void +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_CALLBR:%.*]] = phi i1 [ true, %[[NOCALLBR]] ], [ [[C_INV]], [[TMP0:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_CALLBR]], label %[[CALLBR]], label %[[NOCALLBR]] +; + br i1 %c, label %nocallbr, label %callbr +nocallbr: + br label %callbr +callbr: + callbr void asm "", "!i"() to label %ret [label %nocallbr] +ret: + ret void +} + +; Fix an irreducible loop in which callbr is a regular block (neither entry nor +; header). See the example at the top of FixIrreducible.cpp. +define void @callbr_regular2(i1 %c) { +; CHECK-LABEL: define void @callbr_regular2( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT: [[C_INV:%.*]] = xor i1 [[C]], true +; CHECK-NEXT: br label %[[IRR_GUARD:.*]] +; CHECK: [[NOCALLBR:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[CALLBR:.*]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[NOCALLBR]] [label %ret] +; CHECK: [[RET:.*:]] +; CHECK-NEXT: ret void +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_CALLBR:%.*]] = phi i1 [ true, %[[NOCALLBR]] ], [ [[C_INV]], [[TMP0:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_CALLBR]], label %[[CALLBR]], label %[[NOCALLBR]] +; + br i1 %c, label %nocallbr, label %callbr +nocallbr: + br label %callbr +callbr: + callbr void asm "", "!i"() to label %nocallbr [label %ret] +ret: + ret void +} + +; Fix an irreducible loop with two callbr blocks, one as header and one as regular block. +define void @callbr_header_and_regular(i1 %c) { +; CHECK-LABEL: define void @callbr_header_and_regular( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT: br label %[[CALLBR_HEADER:.*]] +; CHECK: [[CALLBR_HEADER]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[CALLBR_HEADER_TARGET_MID:.*]] [label %callbr_header.target.callbr_regular] +; CHECK: [[MID:.*]]: +; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD:.*]], label %[[RET:.*]] +; CHECK: [[CALLBR_REGULAR:.*]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[CALLBR_HEADER]] [label %mid] +; CHECK: [[RET]]: +; CHECK-NEXT: ret void +; CHECK: [[CALLBR_HEADER_TARGET_MID]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[CALLBR_HEADER_TARGET_CALLBR_REGULAR:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_CALLBR_REGULAR:%.*]] = phi i1 [ true, %[[MID]] ], [ false, %[[CALLBR_HEADER_TARGET_MID]] ], [ true, %[[CALLBR_HEADER_TARGET_CALLBR_REGULAR]] ] +; CHECK-NEXT: br i1 [[GUARD_CALLBR_REGULAR]], label %[[CALLBR_REGULAR]], label %[[MID]] +; + br label %callbr_header +callbr_header: + callbr void asm "", "!i"() to label %mid [label %callbr_regular] +mid: + br i1 %c, label %callbr_regular, label %ret +callbr_regular: + callbr void asm "", "!i"() to label %callbr_header [label %mid] +ret: + ret void +} + +; Fix an irreducible loop consisting only of callbr blocks (and ret). See the +; example at the top of FixIrreducible.cpp. +define void @callbr_only(i1 %c) { +; CHECK-LABEL: define void @callbr_only( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT: [[CALLBR:.*:]] +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[CALLBR_ENTRY_TARGET_CALLBR_HEADER:.*]] [label %callbr_entry.target.callbr_block] +; CHECK: [[CALLBR_HEADER:.*]]: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label %[[CALLBR_HEADER_TARGET_CALLBR_BLOCK:.*]] [] +; CHECK: [[CALLBR_BLOCK:.*]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[CALLBR_HEADER]] [label %ret] +; CHECK: [[RET:.*:]] +; CHECK-NEXT: ret void +; CHECK: [[CALLBR_HEADER_TARGET_CALLBR_BLOCK]]: +; CHECK-NEXT: br label %[[IRR_GUARD:.*]] +; CHECK: [[CALLBR_ENTRY_TARGET_CALLBR_HEADER]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[CALLBR_ENTRY_TARGET_CALLBR_BLOCK:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_CALLBR_BLOCK:%.*]] = phi i1 [ true, %[[CALLBR_HEADER_TARGET_CALLBR_BLOCK]] ], [ false, %[[CALLBR_ENTRY_TARGET_CALLBR_HEADER]] ], [ true, %[[CALLBR_ENTRY_TARGET_CALLBR_BLOCK]] ] +; CHECK-NEXT: br i1 [[GUARD_CALLBR_BLOCK]], label %[[CALLBR_BLOCK]], label %[[CALLBR_HEADER]] +; +callbr_entry: + callbr void asm "", "!i"() to label %callbr_header [label %callbr_block] +callbr_header: + callbr void asm "", ""() to label %callbr_block [] +callbr_block: + callbr void asm "", "!i"() to label %callbr_header [label %ret] +ret: + ret void +} + +; Irreducible loop: entry leading to multiple callbr blocks. +define void @entry_multiple_callbr(i1 %a, i1 %b, i1 %c) { +; CHECK-LABEL: define void @entry_multiple_callbr( +; CHECK-SAME: i1 [[A:%.*]], i1 [[B:%.*]], i1 [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br i1 [[A]], label %[[CB1:.*]], label %[[IRR_GUARD:.*]] +; CHECK: [[CB1]]: +; CHECK-NEXT: callbr void asm "", "!i,!i"() +; CHECK-NEXT: to label %[[CB1_TARGET_BLOCK:.*]] [label %[[CB1_TARGET_CB2:.*]], label %cb1.target.block1] +; CHECK: [[BLOCK:.*]]: +; CHECK-NEXT: br i1 [[B]], label %[[IRR_GUARD]], label %[[BLOCK1:.*]] +; CHECK: [[CB2:.*]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[CB2_TARGET_BLOCK1:.*]] [label %cb2.target.block] +; CHECK: [[BLOCK1]]: +; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD2:.*]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; CHECK: [[CB1_TARGET_BLOCK]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[CB1_TARGET_CB2]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[CB1_TARGET_BLOCK1:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_CB2:%.*]] = phi i1 [ true, %[[BLOCK]] ], [ false, %[[CB1_TARGET_BLOCK]] ], [ true, %[[CB1_TARGET_CB2]] ], [ false, %[[CB1_TARGET_BLOCK1]] ], [ true, %[[ENTRY]] ] +; CHECK-NEXT: [[GUARD_BLOCK:%.*]] = phi i1 [ false, %[[BLOCK]] ], [ true, %[[CB1_TARGET_BLOCK]] ], [ false, %[[CB1_TARGET_CB2]] ], [ false, %[[CB1_TARGET_BLOCK1]] ], [ false, %[[ENTRY]] ] +; CHECK-NEXT: br i1 [[GUARD_CB2]], label %[[CB2]], label %[[IRR_GUARD1:.*]] +; CHECK: [[IRR_GUARD1]]: +; CHECK-NEXT: br label %[[IRR_GUARD2]] +; CHECK: [[CB2_TARGET_BLOCK1]]: +; CHECK-NEXT: br label %[[IRR_GUARD2]] +; CHECK: [[CB2_TARGET_BLOCK:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD2]] +; CHECK: [[IRR_GUARD2]]: +; CHECK-NEXT: [[GUARD_BLOCK3:%.*]] = phi i1 [ true, %[[BLOCK1]] ], [ [[GUARD_BLOCK]], %[[IRR_GUARD1]] ], [ false, %[[CB2_TARGET_BLOCK1]] ], [ true, %[[CB2_TARGET_BLOCK]] ] +; CHECK-NEXT: br i1 [[GUARD_BLOCK3]], label %[[BLOCK]], label %[[BLOCK1]] +; +entry: + br i1 %a, label %cb1, label %cb2 +cb1: + callbr void asm "", "!i,!i"() to label %block [label %cb2, label %block1] +block: + br i1 %b, label %cb2, label %block1 +cb2: + callbr void asm "", "!i"() to label %block1 [label %block] +block1: + br i1 %c, label %block, label %exit +exit: + ret void +} + +; Irreducible loop: callbr as loop exit, with multiple entries +define void @callbr_exit_with_separate_entries(i1 %a, i1 %b, i1 %c) { +; CHECK-LABEL: define void @callbr_exit_with_separate_entries( +; CHECK-SAME: i1 [[A:%.*]], i1 [[B:%.*]], i1 [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[C_INV:%.*]] = xor i1 [[C]], true +; CHECK-NEXT: [[A_INV:%.*]] = xor i1 [[A]], true +; CHECK-NEXT: br label %[[IRR_GUARD:.*]] +; CHECK: [[L1:.*]]: +; CHECK-NEXT: br i1 [[B]], label %[[CB:.*]], label %[[IRR_GUARD]] +; CHECK: [[L2:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD1:.*]] +; CHECK: [[CB]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[EXIT:.*]] [label %cb.target.l1] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_L2:%.*]] = phi i1 [ true, %[[L1]] ], [ [[A_INV]], %[[ENTRY]] ] +; CHECK-NEXT: br i1 [[GUARD_L2]], label %[[L2]], label %[[IRR_GUARD1]] +; CHECK: [[CB_TARGET_L1:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD1]] +; CHECK: [[IRR_GUARD1]]: +; CHECK-NEXT: [[GUARD_L1:%.*]] = phi i1 [ true, %[[CB_TARGET_L1]] ], [ true, %[[IRR_GUARD]] ], [ [[C_INV]], %[[L2]] ] +; CHECK-NEXT: br i1 [[GUARD_L1]], label %[[L1]], label %[[CB]] +; +entry: + br i1 %a, label %l1, label %l2 +l1: + br i1 %b, label %cb, label %l2 +l2: + br i1 %c, label %cb, label %l1 +cb: + callbr void asm "", "!i"() to label %exit [label %l1] +exit: + ret void +} + +define void @callbr_exit_with_separate_entries1(i1 %a, i1 %b) { +; CHECK-LABEL: define void @callbr_exit_with_separate_entries1( +; CHECK-SAME: i1 [[A:%.*]], i1 [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[A_INV:%.*]] = xor i1 [[A]], true +; CHECK-NEXT: br label %[[IRR_GUARD:.*]] +; CHECK: [[LOOP1:.*]]: +; CHECK-NEXT: br i1 [[B]], label %[[CB:.*]], label %[[IRR_GUARD]] +; CHECK: [[LOOP2:.*]]: +; CHECK-NEXT: br label %[[LOOP1]] +; CHECK: [[CB]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[EXIT:.*]] [label %cb.target.loop2] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; CHECK: [[CB_TARGET_LOOP2:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_LOOP2:%.*]] = phi i1 [ true, %[[CB_TARGET_LOOP2]] ], [ true, %[[LOOP1]] ], [ [[A_INV]], %[[ENTRY]] ] +; CHECK-NEXT: br i1 [[GUARD_LOOP2]], label %[[LOOP2]], label %[[LOOP1]] +; +entry: + br i1 %a, label %loop1, label %loop2 +loop1: + br i1 %b, label %cb, label %loop2 +loop2: + br label %loop1 +cb: + callbr void asm "", "!i"() to label %exit [label %loop2] +exit: + ret void +} + +; Irreducible loop: all blocks are callbrs, with cross-edges +define void @callbr_only_multiple(i1 %a, i1 %b, i1 %c) { +; CHECK-LABEL: define void @callbr_only_multiple( +; CHECK-SAME: i1 [[A:%.*]], i1 [[B:%.*]], i1 [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: callbr void asm "", "!i,!i"() +; CHECK-NEXT: to label %[[ENTRY_TARGET_CB1:.*]] [label %[[ENTRY_TARGET_CB2:.*]], label %entry.target.cb3] +; CHECK: [[CB1:.*]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[CB2:.*]] [label %cb1.target.cb3] +; CHECK: [[CB2]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[CB2_TARGET_CB3:.*]] [label %cb2.target.cb1] +; CHECK: [[CB3:.*]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[CB3_TARGET_CB1:.*]] [label %exit] +; CHECK: [[EXIT:.*:]] +; CHECK-NEXT: ret void +; CHECK: [[CB2_TARGET_CB3]]: +; CHECK-NEXT: br label %[[IRR_GUARD:.*]] +; CHECK: [[CB1_TARGET_CB3:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[ENTRY_TARGET_CB1]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[ENTRY_TARGET_CB2]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[ENTRY_TARGET_CB3:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_CB3:%.*]] = phi i1 [ true, %[[CB2_TARGET_CB3]] ], [ true, %[[CB1_TARGET_CB3]] ], [ false, %[[ENTRY_TARGET_CB1]] ], [ false, %[[ENTRY_TARGET_CB2]] ], [ true, %[[ENTRY_TARGET_CB3]] ] +; CHECK-NEXT: [[GUARD_CB1:%.*]] = phi i1 [ false, %[[CB2_TARGET_CB3]] ], [ false, %[[CB1_TARGET_CB3]] ], [ true, %[[ENTRY_TARGET_CB1]] ], [ false, %[[ENTRY_TARGET_CB2]] ], [ false, %[[ENTRY_TARGET_CB3]] ] +; CHECK-NEXT: br i1 [[GUARD_CB3]], label %[[CB3]], label %[[IRR_GUARD1:.*]] +; CHECK: [[IRR_GUARD1]]: +; CHECK-NEXT: br label %[[IRR_GUARD2:.*]] +; CHECK: [[CB2_TARGET_CB1:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD2]] +; CHECK: [[CB3_TARGET_CB1]]: +; CHECK-NEXT: br label %[[IRR_GUARD2]] +; CHECK: [[IRR_GUARD2]]: +; CHECK-NEXT: [[GUARD_CB13:%.*]] = phi i1 [ true, %[[CB2_TARGET_CB1]] ], [ [[GUARD_CB1]], %[[IRR_GUARD1]] ], [ true, %[[CB3_TARGET_CB1]] ] +; CHECK-NEXT: br i1 [[GUARD_CB13]], label %[[CB1]], label %[[CB2]] +; +entry: + callbr void asm "", "!i,!i"() to label %cb1 [label %cb2, label %cb3] +cb1: + callbr void asm "", "!i"() to label %cb2 [label %cb3] +cb2: + callbr void asm "", "!i"() to label %cb3 [label %cb1] +cb3: + callbr void asm "", "!i"() to label %cb1 [label %exit] +exit: + ret void +} + +; Irreducible loop: callbr as a "bypass" block +define void @callbr_bypass(i1 %a, i1 %b, i1 %c) { +; CHECK-LABEL: define void @callbr_bypass( +; CHECK-SAME: i1 [[A:%.*]], i1 [[B:%.*]], i1 [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[B_INV:%.*]] = xor i1 [[B]], true +; CHECK-NEXT: [[A_INV:%.*]] = xor i1 [[A]], true +; CHECK-NEXT: br label %[[IRR_GUARD:.*]] +; CHECK: [[CB:.*]]: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[L2:.*]] [label %cb.target.l1] +; CHECK: [[L1:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD1:.*]] +; CHECK: [[L2]]: +; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD1]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; CHECK: [[CB_TARGET_L1:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_L1:%.*]] = phi i1 [ true, %[[CB_TARGET_L1]] ], [ [[A_INV]], %[[ENTRY]] ] +; CHECK-NEXT: br i1 [[GUARD_L1]], label %[[L1]], label %[[IRR_GUARD1]] +; CHECK: [[IRR_GUARD1]]: +; CHECK-NEXT: [[GUARD_CB:%.*]] = phi i1 [ true, %[[L2]] ], [ true, %[[IRR_GUARD]] ], [ [[B_INV]], %[[L1]] ] +; CHECK-NEXT: br i1 [[GUARD_CB]], label %[[CB]], label %[[L2]] +; +entry: + br i1 %a, label %cb, label %l1 +cb: + callbr void asm "", "!i"() to label %l2 [label %l1] +l1: + br i1 %b, label %l2, label %cb +l2: + br i1 %c, label %cb, label %exit +exit: + ret void +} + +; Irreducible loop: callbr with multiple indirect targets, some looping, some exiting +define void @callbr_multiple_with_exit(i1 %a, i1 %b, i1 %c) { +; CHECK-LABEL: define void @callbr_multiple_with_exit( +; CHECK-SAME: i1 [[A:%.*]], i1 [[B:%.*]], i1 [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: callbr void asm "", "!i,!i,!i"() +; CHECK-NEXT: to label %[[ENTRY_TARGET_L1:.*]] [label %[[ENTRY_TARGET_L2:.*]], label %[[EXIT:.*]], label %entry.target.l3] +; CHECK: [[L1:.*]]: +; CHECK-NEXT: br i1 [[A]], label %[[L2:.*]], label %[[IRR_GUARD:.*]] +; CHECK: [[L2]]: +; CHECK-NEXT: br i1 [[B]], label %[[IRR_GUARD2:.*]], label %[[EXIT]] +; CHECK: [[L3:.*]]: +; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD2]], label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; CHECK: [[ENTRY_TARGET_L1]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[ENTRY_TARGET_L2]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[ENTRY_TARGET_L3:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_L3:%.*]] = phi i1 [ true, %[[L1]] ], [ false, %[[ENTRY_TARGET_L1]] ], [ false, %[[ENTRY_TARGET_L2]] ], [ true, %[[ENTRY_TARGET_L3]] ] +; CHECK-NEXT: [[GUARD_L1:%.*]] = phi i1 [ false, %[[L1]] ], [ true, %[[ENTRY_TARGET_L1]] ], [ false, %[[ENTRY_TARGET_L2]] ], [ false, %[[ENTRY_TARGET_L3]] ] +; CHECK-NEXT: br i1 [[GUARD_L3]], label %[[L3]], label %[[IRR_GUARD1:.*]] +; CHECK: [[IRR_GUARD1]]: +; CHECK-NEXT: br label %[[IRR_GUARD2]] +; CHECK: [[IRR_GUARD2]]: +; CHECK-NEXT: [[GUARD_L13:%.*]] = phi i1 [ true, %[[L2]] ], [ [[GUARD_L1]], %[[IRR_GUARD1]] ], [ true, %[[L3]] ] +; CHECK-NEXT: br i1 [[GUARD_L13]], label %[[L1]], label %[[L2]] +; +entry: + callbr void asm "", "!i,!i,!i"() to label %l1 [label %l2, label %exit, label %l3] +l1: + br i1 %a, label %l2, label %l3 +l2: + br i1 %b, label %l1, label %exit +l3: + br i1 %c, label %l1, label %exit +exit: + ret void +} + +define void @callbr_nested(i1 %c, i1 %d) { +; CHECK-LABEL: define void @callbr_nested( +; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label %[[ENTRY_TARGET_H:.*]] [label %entry.target.b] +; CHECK: [[H:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD1:.*]] +; CHECK: [[B:.*]]: +; CHECK-NEXT: callbr void asm "", "!i,!i"() +; CHECK-NEXT: to label %[[H]] [label %[[B_TARGET_BH:.*]], label %b.target.bb] +; CHECK: [[BH:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD:.*]] +; CHECK: [[BB:.*]]: +; CHECK-NEXT: br i1 [[C]], label %[[BH]], label %[[RET:.*]] +; CHECK: [[RET]]: +; CHECK-NEXT: ret void +; CHECK: [[B_TARGET_BH]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[B_TARGET_BB:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD]] +; CHECK: [[IRR_GUARD]]: +; CHECK-NEXT: [[GUARD_BB:%.*]] = phi i1 [ true, %[[BH]] ], [ false, %[[B_TARGET_BH]] ], [ true, %[[B_TARGET_BB]] ] +; CHECK-NEXT: br i1 [[GUARD_BB]], label %[[BB]], label %[[BH]] +; CHECK: [[ENTRY_TARGET_H]]: +; CHECK-NEXT: br label %[[IRR_GUARD1]] +; CHECK: [[ENTRY_TARGET_B:.*]]: +; CHECK-NEXT: br label %[[IRR_GUARD1]] +; CHECK: [[IRR_GUARD1]]: +; CHECK-NEXT: [[GUARD_B:%.*]] = phi i1 [ true, %[[H]] ], [ false, %[[ENTRY_TARGET_H]] ], [ true, %[[ENTRY_TARGET_B]] ] +; CHECK-NEXT: br i1 [[GUARD_B]], label %[[B]], label %[[H]] +; +entry: + callbr void asm "","!i"() to label %h [label %b] +h: + br label %b +b: + callbr void asm "","!i,!i"() to label %h [label %bh, label %bb] +bh: + br label %bb +bb: + br i1 %c, label %bh, label %ret +ret: + ret void +} + +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; LOOPS-AFTER: {{.*}} +; LOOPS-BEFORE: {{.*}} diff --git a/llvm/test/Transforms/FixIrreducible/nested.ll b/llvm/test/Transforms/FixIrreducible/nested.ll index 0cc6b47..c9161cc1 100644 --- a/llvm/test/Transforms/FixIrreducible/nested.ll +++ b/llvm/test/Transforms/FixIrreducible/nested.ll @@ -50,6 +50,69 @@ exit: ret void } +define void @nested_irr_top_level_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5) { +; CHECK-LABEL: @nested_irr_top_level_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]]) +; CHECK-NEXT: to label [[ENTRY_TARGET_A1:%.*]] [label %entry.target.A2] +; CHECK: A1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]]) +; CHECK-NEXT: to label [[A1_TARGET_B1:%.*]] [label %A1.target.B2] +; CHECK: B1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]]) +; CHECK-NEXT: to label [[B1_TARGET_B2:%.*]] [label %A3] +; CHECK: B2: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED3:%.*]]) +; CHECK-NEXT: to label [[B1:%.*]] [label %A3] +; CHECK: A3: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED4:%.*]]) +; CHECK-NEXT: to label [[A3_TARGET_A2:%.*]] [label %exit] +; CHECK: A2: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED5:%.*]]) +; CHECK-NEXT: to label [[A1:%.*]] [label %exit] +; CHECK: exit: +; CHECK-NEXT: ret void +; CHECK: A3.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD:%.*]] +; CHECK: entry.target.A1: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: entry.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: irr.guard: +; CHECK-NEXT: [[GUARD_A2:%.*]] = phi i1 [ true, [[A3_TARGET_A2]] ], [ false, [[ENTRY_TARGET_A1]] ], [ true, [[ENTRY_TARGET_A2:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_A2]], label [[A2:%.*]], label [[A1]] +; CHECK: B1.target.B2: +; CHECK-NEXT: br label [[IRR_GUARD1:%.*]] +; CHECK: A1.target.B1: +; CHECK-NEXT: br label [[IRR_GUARD1]] +; CHECK: A1.target.B2: +; CHECK-NEXT: br label [[IRR_GUARD1]] +; CHECK: irr.guard1: +; CHECK-NEXT: [[GUARD_B2:%.*]] = phi i1 [ true, [[B1_TARGET_B2]] ], [ false, [[A1_TARGET_B1]] ], [ true, [[A1_TARGET_B2:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_B2]], label [[B2:%.*]], label [[B1]] +; +entry: + callbr void asm "", "r,!i"(i1 %Pred0) to label %A1 [label %A2] + +A1: + callbr void asm "", "r,!i"(i1 %Pred1) to label %B1 [label %B2] + +B1: + callbr void asm "", "r,!i"(i1 %Pred2) to label %B2 [label %A3] + +B2: + callbr void asm "", "r,!i"(i1 %Pred3) to label %B1 [label %A3] + +A3: + callbr void asm "", "r,!i"(i1 %Pred4) to label %A2 [label %exit] + +A2: + callbr void asm "", "r,!i"(i1 %Pred5) to label %A1 [label %exit] + +exit: + ret void +} + define void @nested_irr_in_loop(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6) { ; CHECK-LABEL: @nested_irr_in_loop( ; CHECK-NEXT: entry: @@ -107,6 +170,80 @@ exit: ret void } +define void @nested_irr_in_loop_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6) { +; CHECK-LABEL: @nested_irr_in_loop_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[H1:%.*]] +; CHECK: H1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]]) +; CHECK-NEXT: to label [[H1_TARGET_A1:%.*]] [label %H1.target.A2] +; CHECK: A1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]]) +; CHECK-NEXT: to label [[A1_TARGET_B1:%.*]] [label %A1.target.B2] +; CHECK: B1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]]) +; CHECK-NEXT: to label [[B1_TARGET_B2:%.*]] [label %A3] +; CHECK: B2: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED3:%.*]]) +; CHECK-NEXT: to label [[B1:%.*]] [label %A3] +; CHECK: A3: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED4:%.*]]) +; CHECK-NEXT: to label [[A3_TARGET_A2:%.*]] [label %L1] +; CHECK: A2: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED5:%.*]]) +; CHECK-NEXT: to label [[A1:%.*]] [label %L1] +; CHECK: L1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED6:%.*]]) +; CHECK-NEXT: to label [[EXIT:%.*]] [label %H1] +; CHECK: exit: +; CHECK-NEXT: ret void +; CHECK: A3.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD:%.*]] +; CHECK: H1.target.A1: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: H1.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: irr.guard: +; CHECK-NEXT: [[GUARD_A2:%.*]] = phi i1 [ true, [[A3_TARGET_A2]] ], [ false, [[H1_TARGET_A1]] ], [ true, [[H1_TARGET_A2:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_A2]], label [[A2:%.*]], label [[A1]] +; CHECK: B1.target.B2: +; CHECK-NEXT: br label [[IRR_GUARD1:%.*]] +; CHECK: A1.target.B1: +; CHECK-NEXT: br label [[IRR_GUARD1]] +; CHECK: A1.target.B2: +; CHECK-NEXT: br label [[IRR_GUARD1]] +; CHECK: irr.guard1: +; CHECK-NEXT: [[GUARD_B2:%.*]] = phi i1 [ true, [[B1_TARGET_B2]] ], [ false, [[A1_TARGET_B1]] ], [ true, [[A1_TARGET_B2:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_B2]], label [[B2:%.*]], label [[B1]] +; +entry: + br label %H1 + +H1: + callbr void asm "", "r,!i"(i1 %Pred0) to label %A1 [label %A2] + +A1: + callbr void asm "", "r,!i"(i1 %Pred1) to label %B1 [label %B2] + +B1: + callbr void asm "", "r,!i"(i1 %Pred2) to label %B2 [label %A3] + +B2: + callbr void asm "", "r,!i"(i1 %Pred3) to label %B1 [label %A3] + +A3: + callbr void asm "", "r,!i"(i1 %Pred4) to label %A2 [label %L1] + +A2: + callbr void asm "", "r,!i"(i1 %Pred5) to label %A1 [label %L1] + +L1: + callbr void asm "", "r,!i"(i1 %Pred6) to label %exit [label %H1] + +exit: + ret void +} + define void @loop_in_irr(i1 %Pred0, i1 %Pred1, i1 %Pred2) { ; CHECK-LABEL: @loop_in_irr( ; CHECK-NEXT: entry: @@ -150,6 +287,60 @@ exit: ret void } +define void @loop_in_irr_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2) { +; CHECK-LABEL: @loop_in_irr_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]]) +; CHECK-NEXT: to label [[ENTRY_TARGET_A1:%.*]] [label %entry.target.A2] +; CHECK: A1: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[H1:%.*]] [] +; CHECK: H1: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[L1:%.*]] [] +; CHECK: L1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]]) +; CHECK-NEXT: to label [[H1]] [label %A3] +; CHECK: A3: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]]) +; CHECK-NEXT: to label [[A3_TARGET_A2:%.*]] [label %exit] +; CHECK: A2: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[A1:%.*]] [] +; CHECK: exit: +; CHECK-NEXT: ret void +; CHECK: A3.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD:%.*]] +; CHECK: entry.target.A1: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: entry.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: irr.guard: +; CHECK-NEXT: [[GUARD_A2:%.*]] = phi i1 [ true, [[A3_TARGET_A2]] ], [ false, [[ENTRY_TARGET_A1]] ], [ true, [[ENTRY_TARGET_A2:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_A2]], label [[A2:%.*]], label [[A1]] +; +entry: + callbr void asm "", "r,!i"(i1 %Pred0) to label %A1 [label %A2] + +A1: + callbr void asm "", ""() to label %H1 [] + +H1: + callbr void asm "", ""() to label %L1 [] + +L1: + callbr void asm "", "r,!i"(i1 %Pred1) to label %H1 [label %A3] + +A3: + callbr void asm "", "r,!i"(i1 %Pred2) to label %A2 [label %exit] + +A2: + callbr void asm "", ""() to label %A1 [] + +exit: + ret void +} + define void @loop_in_irr_shared_entry(i1 %Pred0, i1 %Pred1, i1 %Pred2) { ; CHECK-LABEL: @loop_in_irr_shared_entry( ; CHECK-NEXT: entry: @@ -188,6 +379,54 @@ exit: ret void } +define void @loop_in_irr_shared_entry_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2) { +; CHECK-LABEL: @loop_in_irr_shared_entry_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]]) +; CHECK-NEXT: to label [[ENTRY_TARGET_H1:%.*]] [label %entry.target.A2] +; CHECK: H1: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[L1:%.*]] [] +; CHECK: L1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]]) +; CHECK-NEXT: to label [[H1:%.*]] [label %A3] +; CHECK: A3: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]]) +; CHECK-NEXT: to label [[A3_TARGET_A2:%.*]] [label %exit] +; CHECK: A2: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[H1]] [] +; CHECK: exit: +; CHECK-NEXT: ret void +; CHECK: A3.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD:%.*]] +; CHECK: entry.target.H1: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: entry.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: irr.guard: +; CHECK-NEXT: [[GUARD_A2:%.*]] = phi i1 [ true, [[A3_TARGET_A2]] ], [ false, [[ENTRY_TARGET_H1]] ], [ true, [[ENTRY_TARGET_A2:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_A2]], label [[A2:%.*]], label [[H1]] +; +entry: + callbr void asm "", "r,!i"(i1 %Pred0) to label %H1 [label %A2] + +H1: + callbr void asm "", ""() to label %L1 [] + +L1: + callbr void asm "", "r,!i"(i1 %Pred1) to label %H1 [label %A3] + +A3: + callbr void asm "", "r,!i"(i1 %Pred2) to label %A2 [label %exit] + +A2: + callbr void asm "", ""() to label %H1 [] + +exit: + ret void +} + define void @loop_in_irr_shared_header(i1 %Pred0, i1 %Pred1, i1 %Pred2) { ; CHECK-LABEL: @loop_in_irr_shared_header( ; CHECK-NEXT: entry: @@ -226,6 +465,56 @@ exit: ret void } +define void @loop_in_irr_shared_header_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2) { +; CHECK-LABEL: @loop_in_irr_shared_header_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]]) +; CHECK-NEXT: to label [[ENTRY_TARGET_A2:%.*]] [label %entry.target.H1] +; CHECK: H1: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[L1:%.*]] [] +; CHECK: L1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]]) +; CHECK-NEXT: to label [[L1_TARGET_H1:%.*]] [label %A3] +; CHECK: A3: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]]) +; CHECK-NEXT: to label [[A2:%.*]] [label %exit] +; CHECK: A2: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[A2_TARGET_H1:%.*]] [] +; CHECK: exit: +; CHECK-NEXT: ret void +; CHECK: A2.target.H1: +; CHECK-NEXT: br label [[IRR_GUARD:%.*]] +; CHECK: L1.target.H1: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: entry.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: entry.target.H1: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: irr.guard: +; CHECK-NEXT: [[GUARD_H1:%.*]] = phi i1 [ true, [[A2_TARGET_H1]] ], [ true, [[L1_TARGET_H1]] ], [ false, [[ENTRY_TARGET_A2]] ], [ true, [[ENTRY_TARGET_H1:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_H1]], label [[H1:%.*]], label [[A2]] +; +entry: + callbr void asm "", "r,!i"(i1 %Pred0) to label %A2 [label %H1] + +H1: + callbr void asm "", ""() to label %L1 [] + +L1: + callbr void asm "", "r,!i"(i1 %Pred1) to label %H1 [label %A3] + +A3: + callbr void asm "", "r,!i"(i1 %Pred2) to label %A2 [label %exit] + +A2: + callbr void asm "", ""() to label %H1 [] + +exit: + ret void +} + define void @loop_irr_loop_shared_header(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3) { ; CHECK-LABEL: @loop_irr_loop_shared_header( ; CHECK-NEXT: entry: @@ -269,6 +558,62 @@ exit: ret void } +define void @loop_irr_loop_shared_header_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3) { +; CHECK-LABEL: @loop_irr_loop_shared_header_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[H2:%.*]] [] +; CHECK: H2: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]]) +; CHECK-NEXT: to label [[H2_TARGET_A2:%.*]] [label %H2.target.H1] +; CHECK: H1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]]) +; CHECK-NEXT: to label [[A3:%.*]] [label %H1.target.H1] +; CHECK: A3: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]]) +; CHECK-NEXT: to label [[A2:%.*]] [label %L2] +; CHECK: A2: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[A2_TARGET_H1:%.*]] [] +; CHECK: L2: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED3:%.*]]) +; CHECK-NEXT: to label [[H2]] [label %exit] +; CHECK: exit: +; CHECK-NEXT: ret void +; CHECK: A2.target.H1: +; CHECK-NEXT: br label [[IRR_GUARD:%.*]] +; CHECK: H1.target.H1: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: H2.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: H2.target.H1: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: irr.guard: +; CHECK-NEXT: [[GUARD_H1:%.*]] = phi i1 [ true, [[A2_TARGET_H1]] ], [ true, [[H1_TARGET_H1:%.*]] ], [ false, [[H2_TARGET_A2]] ], [ true, [[H2_TARGET_H1:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_H1]], label [[H1:%.*]], label [[A2]] +; +entry: + callbr void asm "", ""() to label %H2 [] + +H2: + callbr void asm "", "r,!i"(i1 %Pred0) to label %A2 [label %H1] + +H1: + callbr void asm "", "r,!i"(i1 %Pred1) to label %A3 [label %H1] + +A3: + callbr void asm "", "r,!i"(i1 %Pred2) to label %A2 [label %L2] + +A2: + callbr void asm "", ""() to label %H1 [] + +L2: + callbr void asm "", "r,!i"(i1 %Pred3) to label %H2 [label %exit] + +exit: + ret void +} + define void @siblings_top_level(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6) { ; CHECK-LABEL: @siblings_top_level( ; CHECK-NEXT: entry: @@ -336,6 +681,93 @@ exit: ret void } +define void @siblings_top_level_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6) { +; CHECK-LABEL: @siblings_top_level_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]]) +; CHECK-NEXT: to label [[H1:%.*]] [label %fork1] +; CHECK: H1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]]) +; CHECK-NEXT: to label [[H1_TARGET_A1:%.*]] [label %H1.target.A2] +; CHECK: A1: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[A1_TARGET_A2:%.*]] [] +; CHECK: A2: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]]) +; CHECK-NEXT: to label [[A1:%.*]] [label %L1] +; CHECK: L1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED3:%.*]]) +; CHECK-NEXT: to label [[H1]] [label %exit] +; CHECK: fork1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED4:%.*]]) +; CHECK-NEXT: to label [[FORK1_TARGET_B1:%.*]] [label %fork1.target.B2] +; CHECK: B1: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[H2:%.*]] [] +; CHECK: H2: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[L2:%.*]] [] +; CHECK: L2: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED5:%.*]]) +; CHECK-NEXT: to label [[H2]] [label %L2.target.B2] +; CHECK: B2: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED6:%.*]]) +; CHECK-NEXT: to label [[B1:%.*]] [label %exit] +; CHECK: exit: +; CHECK-NEXT: ret void +; CHECK: A1.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD:%.*]] +; CHECK: H1.target.A1: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: H1.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: irr.guard: +; CHECK-NEXT: [[GUARD_A2:%.*]] = phi i1 [ true, [[A1_TARGET_A2]] ], [ false, [[H1_TARGET_A1]] ], [ true, [[H1_TARGET_A2:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_A2]], label [[A2:%.*]], label [[A1]] +; CHECK: L2.target.B2: +; CHECK-NEXT: br label [[IRR_GUARD1:%.*]] +; CHECK: fork1.target.B1: +; CHECK-NEXT: br label [[IRR_GUARD1]] +; CHECK: fork1.target.B2: +; CHECK-NEXT: br label [[IRR_GUARD1]] +; CHECK: irr.guard1: +; CHECK-NEXT: [[GUARD_B2:%.*]] = phi i1 [ true, [[L2_TARGET_B2:%.*]] ], [ false, [[FORK1_TARGET_B1]] ], [ true, [[FORK1_TARGET_B2:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_B2]], label [[B2:%.*]], label [[B1]] +; +entry: + callbr void asm "", "r,!i"(i1 %Pred0) to label %H1 [label %fork1] + +H1: + callbr void asm "", "r,!i"(i1 %Pred1) to label %A1 [label %A2] + +A1: + callbr void asm "", ""() to label %A2 [] + +A2: + callbr void asm "", "r,!i"(i1 %Pred2) to label %A1 [label %L1] + +L1: + callbr void asm "", "r,!i"(i1 %Pred3) to label %H1 [label %exit] + +fork1: + callbr void asm "", "r,!i"(i1 %Pred4) to label %B1 [label %B2] + +B1: + callbr void asm "", ""() to label %H2 [] + +H2: + callbr void asm "", ""() to label %L2 [] + +L2: + callbr void asm "", "r,!i"(i1 %Pred5) to label %H2 [label %B2] + +B2: + callbr void asm "", "r,!i"(i1 %Pred6) to label %B1 [label %exit] + +exit: + ret void +} + define void @siblings_in_loop(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6, i1 %Pred7) { ; CHECK-LABEL: @siblings_in_loop( ; CHECK-NEXT: entry: @@ -413,6 +845,105 @@ exit: ret void } +define void @siblings_in_loop_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6, i1 %Pred7) { +; CHECK-LABEL: @siblings_in_loop_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[H0:%.*]] [] +; CHECK: H0: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]]) +; CHECK-NEXT: to label [[H1:%.*]] [label %fork1] +; CHECK: H1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]]) +; CHECK-NEXT: to label [[H1_TARGET_A1:%.*]] [label %H1.target.A2] +; CHECK: A1: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[A1_TARGET_A2:%.*]] [] +; CHECK: A2: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]]) +; CHECK-NEXT: to label [[A1:%.*]] [label %L1] +; CHECK: L1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED3:%.*]]) +; CHECK-NEXT: to label [[H1]] [label %L0] +; CHECK: fork1: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED4:%.*]]) +; CHECK-NEXT: to label [[FORK1_TARGET_B1:%.*]] [label %fork1.target.B2] +; CHECK: B1: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[H2:%.*]] [] +; CHECK: H2: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[L2:%.*]] [] +; CHECK: L2: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED5:%.*]]) +; CHECK-NEXT: to label [[H2]] [label %L2.target.B2] +; CHECK: B2: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED6:%.*]]) +; CHECK-NEXT: to label [[B1:%.*]] [label %L0] +; CHECK: L0: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED7:%.*]]) +; CHECK-NEXT: to label [[EXIT:%.*]] [label %H0] +; CHECK: exit: +; CHECK-NEXT: ret void +; CHECK: A1.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD:%.*]] +; CHECK: H1.target.A1: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: H1.target.A2: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: irr.guard: +; CHECK-NEXT: [[GUARD_A2:%.*]] = phi i1 [ true, [[A1_TARGET_A2]] ], [ false, [[H1_TARGET_A1]] ], [ true, [[H1_TARGET_A2:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_A2]], label [[A2:%.*]], label [[A1]] +; CHECK: L2.target.B2: +; CHECK-NEXT: br label [[IRR_GUARD1:%.*]] +; CHECK: fork1.target.B1: +; CHECK-NEXT: br label [[IRR_GUARD1]] +; CHECK: fork1.target.B2: +; CHECK-NEXT: br label [[IRR_GUARD1]] +; CHECK: irr.guard1: +; CHECK-NEXT: [[GUARD_B2:%.*]] = phi i1 [ true, [[L2_TARGET_B2:%.*]] ], [ false, [[FORK1_TARGET_B1]] ], [ true, [[FORK1_TARGET_B2:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_B2]], label [[B2:%.*]], label [[B1]] +; +entry: + callbr void asm "", ""() to label %H0 [] + +H0: + callbr void asm "", "r,!i"(i1 %Pred0) to label %H1 [label %fork1] + +H1: + callbr void asm "", "r,!i"(i1 %Pred1) to label %A1 [label %A2] + +A1: + callbr void asm "", ""() to label %A2 [] + +A2: + callbr void asm "", "r,!i"(i1 %Pred2) to label %A1 [label %L1] + +L1: + callbr void asm "", "r,!i"(i1 %Pred3) to label %H1 [label %L0] + +fork1: + callbr void asm "", "r,!i"(i1 %Pred4) to label %B1 [label %B2] + +B1: + callbr void asm "", ""() to label %H2 [] + +H2: + callbr void asm "", ""() to label %L2 [] + +L2: + callbr void asm "", "r,!i"(i1 %Pred5) to label %H2 [label %B2] + +B2: + callbr void asm "", "r,!i"(i1 %Pred6) to label %B1 [label %L0] + +L0: + callbr void asm "", "r,!i"(i1 %Pred7) to label %exit [label %H0] + +exit: + ret void +} + define void @irr_in_irr_shared_entry(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6, i1 %Pred7, i1 %Pred8, i1 %Pred9, i1 %Pred10, i1 %Pred11, i1 %Pred12, i1 %Pred13) { ; CHECK-LABEL: @irr_in_irr_shared_entry( ; CHECK-NEXT: entry: @@ -527,3 +1058,148 @@ if.end8.i: exit: ret void } + +define void @irr_in_irr_shared_entry_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6, i1 %Pred7, i1 %Pred8, i1 %Pred9, i1 %Pred10, i1 %Pred11, i1 %Pred12, i1 %Pred13) { +; CHECK-LABEL: @irr_in_irr_shared_entry_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]]) +; CHECK-NEXT: to label [[IF_END:%.*]] [label %if.then] +; CHECK: if.end: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]]) +; CHECK-NEXT: to label [[IF_THEN7:%.*]] [label %if.else] +; CHECK: if.then7: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[IF_END16:%.*]] [] +; CHECK: if.else: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[IF_END16]] [] +; CHECK: if.end16: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]]) +; CHECK-NEXT: to label [[WHILE_COND_PREHEADER:%.*]] [label %if.then39] +; CHECK: while.cond.preheader: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[WHILE_COND:%.*]] [] +; CHECK: while.cond: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED3:%.*]]) +; CHECK-NEXT: to label [[WHILE_COND_TARGET_COND_TRUE49:%.*]] [label %lor.rhs] +; CHECK: cond.true49: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED4:%.*]]) +; CHECK-NEXT: to label [[IF_THEN69:%.*]] [label %cond.true49.target.while.body63] +; CHECK: while.body63: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED5:%.*]]) +; CHECK-NEXT: to label [[EXIT:%.*]] [label %while.cond47] +; CHECK: while.cond47: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED6:%.*]]) +; CHECK-NEXT: to label [[COND_TRUE49:%.*]] [label %while.cond47.target.cond.end61] +; CHECK: cond.end61: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED7:%.*]]) +; CHECK-NEXT: to label [[COND_END61_TARGET_WHILE_BODY63:%.*]] [label %while.cond] +; CHECK: if.then69: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED8:%.*]]) +; CHECK-NEXT: to label [[EXIT]] [label %while.cond] +; CHECK: lor.rhs: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED9:%.*]]) +; CHECK-NEXT: to label [[LOR_RHS_TARGET_COND_END61:%.*]] [label %while.end76] +; CHECK: while.end76: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[EXIT]] [] +; CHECK: if.then39: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED10:%.*]]) +; CHECK-NEXT: to label [[EXIT]] [label %if.end.i145] +; CHECK: if.end.i145: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED11:%.*]]) +; CHECK-NEXT: to label [[EXIT]] [label %if.end8.i149] +; CHECK: if.end8.i149: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[EXIT]] [] +; CHECK: if.then: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED12:%.*]]) +; CHECK-NEXT: to label [[EXIT]] [label %if.end.i] +; CHECK: if.end.i: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED13:%.*]]) +; CHECK-NEXT: to label [[EXIT]] [label %if.end8.i] +; CHECK: if.end8.i: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[EXIT]] [] +; CHECK: exit: +; CHECK-NEXT: ret void +; CHECK: while.cond47.target.cond.end61: +; CHECK-NEXT: br label [[IRR_GUARD:%.*]] +; CHECK: lor.rhs.target.cond.end61: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: while.cond.target.cond.true49: +; CHECK-NEXT: br label [[IRR_GUARD]] +; CHECK: irr.guard: +; CHECK-NEXT: [[GUARD_COND_END61:%.*]] = phi i1 [ true, [[WHILE_COND47_TARGET_COND_END61:%.*]] ], [ true, [[LOR_RHS_TARGET_COND_END61]] ], [ false, [[WHILE_COND_TARGET_COND_TRUE49]] ] +; CHECK-NEXT: br i1 [[GUARD_COND_END61]], label [[COND_END61:%.*]], label [[IRR_GUARD1:%.*]] +; CHECK: cond.true49.target.while.body63: +; CHECK-NEXT: br label [[IRR_GUARD1]] +; CHECK: cond.end61.target.while.body63: +; CHECK-NEXT: br label [[IRR_GUARD1]] +; CHECK: irr.guard1: +; CHECK-NEXT: [[GUARD_WHILE_BODY63:%.*]] = phi i1 [ true, [[COND_TRUE49_TARGET_WHILE_BODY63:%.*]] ], [ true, [[COND_END61_TARGET_WHILE_BODY63]] ], [ false, [[IRR_GUARD]] ] +; CHECK-NEXT: br i1 [[GUARD_WHILE_BODY63]], label [[WHILE_BODY63:%.*]], label [[COND_TRUE49]] +; +entry: + callbr void asm "", "r,!i"(i1 %Pred0) to label %if.end [label %if.then] + +if.end: + callbr void asm "", "r,!i"(i1 %Pred1) to label %if.then7 [label %if.else] + +if.then7: + callbr void asm "", ""() to label %if.end16 [] + +if.else: + callbr void asm "", ""() to label %if.end16 [] + +if.end16: + callbr void asm "", "r,!i"(i1 %Pred2) to label %while.cond.preheader [label %if.then39] + +while.cond.preheader: + callbr void asm "", ""() to label %while.cond [] + +while.cond: + callbr void asm "", "r,!i"(i1 %Pred3) to label %cond.true49 [label %lor.rhs] + +cond.true49: + callbr void asm "", "r,!i"(i1 %Pred4) to label %if.then69 [label %while.body63] + +while.body63: + callbr void asm "", "r,!i"(i1 %Pred5) to label %exit [label %while.cond47] + +while.cond47: + callbr void asm "", "r,!i"(i1 %Pred6) to label %cond.true49 [label %cond.end61] + +cond.end61: + callbr void asm "", "r,!i"(i1 %Pred7) to label %while.body63 [label %while.cond] + +if.then69: + callbr void asm "", "r,!i"(i1 %Pred8) to label %exit [label %while.cond] + +lor.rhs: + callbr void asm "", "r,!i"(i1 %Pred9) to label %cond.end61 [label %while.end76] + +while.end76: + callbr void asm "", ""() to label %exit [] + +if.then39: + callbr void asm "", "r,!i"(i1 %Pred10) to label %exit [label %if.end.i145] + +if.end.i145: + callbr void asm "", "r,!i"(i1 %Pred11) to label %exit [label %if.end8.i149] + +if.end8.i149: + callbr void asm "", ""() to label %exit [] + +if.then: + callbr void asm "", "r,!i"(i1 %Pred12) to label %exit [label %if.end.i] + +if.end.i: + callbr void asm "", "r,!i"(i1 %Pred13) to label %exit [label %if.end8.i] + +if.end8.i: + callbr void asm "", ""() to label %exit [] + +exit: + ret void +} diff --git a/llvm/test/Transforms/FixIrreducible/unreachable.ll b/llvm/test/Transforms/FixIrreducible/unreachable.ll index defbefb..845cf50 100644 --- a/llvm/test/Transforms/FixIrreducible/unreachable.ll +++ b/llvm/test/Transforms/FixIrreducible/unreachable.ll @@ -25,3 +25,26 @@ loop.latch: loop.exit: ret void } + +; CHECK-LABEL: @unreachable_callbr( +; CHECK: entry: +; CHECK-NOT: irr.guard: +define void @unreachable_callbr(i32 %n, i1 %arg) { +entry: + callbr void asm "", ""() to label %loop.body [] + +loop.body: + callbr void asm "", ""() to label %inner.block [] + +unreachable.block: + callbr void asm "", ""() to label %inner.block [] + +inner.block: + callbr void asm "", "r,!i"(i1 %arg) to label %loop.exit [label %loop.latch] + +loop.latch: + callbr void asm "", ""() to label %loop.body [] + +loop.exit: + ret void +} diff --git a/llvm/test/Transforms/GVN/assume-equal.ll b/llvm/test/Transforms/GVN/assume-equal.ll index bbbc5c5..a389801 100644 --- a/llvm/test/Transforms/GVN/assume-equal.ll +++ b/llvm/test/Transforms/GVN/assume-equal.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt < %s -passes=gvn -S | FileCheck %s +target datalayout = "p1:64:64:64:32" + %struct.A = type { ptr } @_ZTV1A = available_externally unnamed_addr constant [4 x ptr] [ptr null, ptr @_ZTI1A, ptr @_ZN1A3fooEv, ptr @_ZN1A3barEv], align 8 @_ZTI1A = external constant ptr @@ -372,6 +374,20 @@ define i1 @assume_ptr_eq_different_prov_does_not_matter_icmp(ptr %p, ptr %p2) { ret i1 %c } +define i1 @assume_ptr_eq_different_prov_does_not_matter_icmp_addrsize(ptr addrspace(1) %p, ptr addrspace(1) %p2) { +; CHECK-LABEL: define i1 @assume_ptr_eq_different_prov_does_not_matter_icmp_addrsize( +; CHECK-SAME: ptr addrspace(1) [[P:%.*]], ptr addrspace(1) [[P2:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[P]], [[P2]] +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) +; CHECK-NEXT: [[C:%.*]] = icmp eq ptr addrspace(1) [[P]], null +; CHECK-NEXT: ret i1 [[C]] +; + %cmp = icmp eq ptr addrspace(1) %p, %p2 + call void @llvm.assume(i1 %cmp) + %c = icmp eq ptr addrspace(1) %p2, null + ret i1 %c +} + ; This is not correct, as it may change the provenance exposed by ptrtoint. ; We still allow it for now. define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoint(ptr %p, ptr %p2) { @@ -388,6 +404,20 @@ define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoint(ptr %p, ptr %p ret i64 %int } +define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoint_addrsize(ptr addrspace(1) %p, ptr addrspace(1) %p2) { +; CHECK-LABEL: define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoint_addrsize( +; CHECK-SAME: ptr addrspace(1) [[P:%.*]], ptr addrspace(1) [[P2:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[P]], [[P2]] +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) +; CHECK-NEXT: [[INT:%.*]] = ptrtoint ptr addrspace(1) [[P]] to i64 +; CHECK-NEXT: ret i64 [[INT]] +; + %cmp = icmp eq ptr addrspace(1) %p, %p2 + call void @llvm.assume(i1 %cmp) + %int = ptrtoint ptr addrspace(1) %p2 to i64 + ret i64 %int +} + define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoaddr(ptr %p, ptr %p2) { ; CHECK-LABEL: define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoaddr( ; CHECK-SAME: ptr [[P:%.*]], ptr [[P2:%.*]]) { @@ -402,6 +432,20 @@ define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoaddr(ptr %p, ptr % ret i64 %int } +define i32 @assume_ptr_eq_different_prov_does_not_matter_ptrtoaddr_addrsize(ptr addrspace(1) %p, ptr addrspace(1) %p2) { +; CHECK-LABEL: define i32 @assume_ptr_eq_different_prov_does_not_matter_ptrtoaddr_addrsize( +; CHECK-SAME: ptr addrspace(1) [[P:%.*]], ptr addrspace(1) [[P2:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[P]], [[P2]] +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) +; CHECK-NEXT: [[INT:%.*]] = ptrtoaddr ptr addrspace(1) [[P]] to i32 +; CHECK-NEXT: ret i32 [[INT]] +; + %cmp = icmp eq ptr addrspace(1) %p, %p2 + call void @llvm.assume(i1 %cmp) + %int = ptrtoaddr ptr addrspace(1) %p2 to i32 + ret i32 %int +} + define i8 @assume_ptr_eq_same_prov(ptr %p, i64 %x) { ; CHECK-LABEL: define i8 @assume_ptr_eq_same_prov( ; CHECK-SAME: ptr [[P:%.*]], i64 [[X:%.*]]) { diff --git a/llvm/test/Transforms/IndVarSimplify/AMDGPU/addrspace-7-doesnt-crash.ll b/llvm/test/Transforms/IndVarSimplify/AMDGPU/addrspace-7-doesnt-crash.ll index 08dcf1d..8e932e0 100644 --- a/llvm/test/Transforms/IndVarSimplify/AMDGPU/addrspace-7-doesnt-crash.ll +++ b/llvm/test/Transforms/IndVarSimplify/AMDGPU/addrspace-7-doesnt-crash.ll @@ -7,11 +7,11 @@ define void @f(ptr addrspace(7) %arg) { ; CHECK-LABEL: define void @f ; CHECK-SAME: (ptr addrspace(7) [[ARG:%.*]]) { ; CHECK-NEXT: bb: +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr addrspace(7) [[ARG]], i32 8 ; CHECK-NEXT: br label [[BB1:%.*]] ; CHECK: bb1: ; CHECK-NEXT: br i1 false, label [[BB2:%.*]], label [[BB1]] ; CHECK: bb2: -; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr addrspace(7) [[ARG]], i32 8 ; CHECK-NEXT: br label [[BB3:%.*]] ; CHECK: bb3: ; CHECK-NEXT: [[I4:%.*]] = load i32, ptr addrspace(7) [[SCEVGEP]], align 4 diff --git a/llvm/test/Transforms/IndVarSimplify/ARM/code-size.ll b/llvm/test/Transforms/IndVarSimplify/ARM/code-size.ll index 2003b1a..3c6535d 100644 --- a/llvm/test/Transforms/IndVarSimplify/ARM/code-size.ll +++ b/llvm/test/Transforms/IndVarSimplify/ARM/code-size.ll @@ -4,33 +4,31 @@ define i32 @remove_loop(i32 %size) #0 { ; CHECK-V8M-LABEL: @remove_loop( -; CHECK-V8M-SAME: i32 [[SIZE:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-V8M-NEXT: entry: -; CHECK-V8M-NEXT: br label %[[WHILE_COND:.*]] -; CHECK-V8M: while.cond: -; CHECK-V8M-NEXT: br i1 false, label %[[WHILE_COND]], label %[[WHILE_END:.*]] -; CHECK-V8M: while.end: -; CHECK-V8M-NEXT: [[TMP0:%.*]] = add i32 [[SIZE]], 31 +; CHECK-V8M-NEXT: [[TMP0:%.*]] = add i32 [[SIZE:%.*]], 31 ; CHECK-V8M-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SIZE]], i32 31) ; CHECK-V8M-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[UMIN]] ; CHECK-V8M-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 5 ; CHECK-V8M-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 5 ; CHECK-V8M-NEXT: [[TMP4:%.*]] = sub i32 [[SIZE]], [[TMP3]] +; CHECK-V8M-NEXT: br label [[WHILE_COND:%.*]] +; CHECK-V8M: while.cond: +; CHECK-V8M-NEXT: br i1 false, label [[WHILE_COND]], label [[WHILE_END:%.*]] +; CHECK-V8M: while.end: ; CHECK-V8M-NEXT: ret i32 [[TMP4]] ; ; CHECK-V8A-LABEL: @remove_loop( -; CHECK-V8A-SAME: i32 [[SIZE:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-V8A-NEXT: entry: -; CHECK-V8A-NEXT: br label %[[WHILE_COND:.*]] -; CHECK-V8A: while.cond: -; CHECK-V8A-NEXT: br i1 false, label %[[WHILE_COND]], label %[[WHILE_END:.*]] -; CHECK-V8A: while.end: -; CHECK-V8A-NEXT: [[TMP0:%.*]] = add i32 [[SIZE]], 31 +; CHECK-V8A-NEXT: [[TMP0:%.*]] = add i32 [[SIZE:%.*]], 31 ; CHECK-V8A-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SIZE]], i32 31) ; CHECK-V8A-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[UMIN]] ; CHECK-V8A-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 5 ; CHECK-V8A-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 5 ; CHECK-V8A-NEXT: [[TMP4:%.*]] = sub i32 [[SIZE]], [[TMP3]] +; CHECK-V8A-NEXT: br label [[WHILE_COND:%.*]] +; CHECK-V8A: while.cond: +; CHECK-V8A-NEXT: br i1 false, label [[WHILE_COND]], label [[WHILE_END:%.*]] +; CHECK-V8A: while.end: ; CHECK-V8A-NEXT: ret i32 [[TMP4]] ; entry: diff --git a/llvm/test/Transforms/IndVarSimplify/ARM/indvar-unroll-imm-cost.ll b/llvm/test/Transforms/IndVarSimplify/ARM/indvar-unroll-imm-cost.ll index 2261423..382f026 100644 --- a/llvm/test/Transforms/IndVarSimplify/ARM/indvar-unroll-imm-cost.ll +++ b/llvm/test/Transforms/IndVarSimplify/ARM/indvar-unroll-imm-cost.ll @@ -77,6 +77,8 @@ define dso_local arm_aapcscc void @test(ptr nocapture %pDest, ptr nocapture read ; CHECK-NEXT: [[CMP2780:%.*]] = icmp ugt i32 [[ADD25]], [[J_0_LCSSA]] ; CHECK-NEXT: br i1 [[CMP2780]], label [[FOR_BODY29_PREHEADER:%.*]], label [[FOR_END40]] ; CHECK: for.body29.preheader: +; CHECK-NEXT: [[TMP10:%.*]] = sub nsw i32 [[ADD25]], [[J_0_LCSSA]] +; CHECK-NEXT: [[SCEVGEP93:%.*]] = getelementptr i16, ptr [[PSRCB_ADDR_1_LCSSA]], i32 [[TMP10]] ; CHECK-NEXT: br label [[FOR_BODY29:%.*]] ; CHECK: for.body29: ; CHECK-NEXT: [[J_184:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY29]] ], [ [[J_0_LCSSA]], [[FOR_BODY29_PREHEADER]] ] @@ -100,8 +102,6 @@ define dso_local arm_aapcscc void @test(ptr nocapture %pDest, ptr nocapture read ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[ADD25]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END40_LOOPEXIT:%.*]], label [[FOR_BODY29]] ; CHECK: for.end40.loopexit: -; CHECK-NEXT: [[TMP10:%.*]] = sub nsw i32 [[ADD25]], [[J_0_LCSSA]] -; CHECK-NEXT: [[SCEVGEP93:%.*]] = getelementptr i16, ptr [[PSRCB_ADDR_1_LCSSA]], i32 [[TMP10]] ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, ptr [[PSRCA_ADDR_1_LCSSA]], i32 [[TMP10]] ; CHECK-NEXT: [[SCEVGEP94:%.*]] = getelementptr i32, ptr [[PDEST_ADDR_1_LCSSA]], i32 [[TMP10]] ; CHECK-NEXT: br label [[FOR_END40]] diff --git a/llvm/test/Transforms/IndVarSimplify/X86/inner-loop-by-latch-cond.ll b/llvm/test/Transforms/IndVarSimplify/X86/inner-loop-by-latch-cond.ll index 0fa6e34..0eb9deb 100644 --- a/llvm/test/Transforms/IndVarSimplify/X86/inner-loop-by-latch-cond.ll +++ b/llvm/test/Transforms/IndVarSimplify/X86/inner-loop-by-latch-cond.ll @@ -14,6 +14,7 @@ define void @test(i64 %a) { ; CHECK: outer_header: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[OUTER_LATCH:%.*]] ], [ 21, [[ENTRY:%.*]] ] ; CHECK-NEXT: [[I:%.*]] = phi i64 [ 20, [[ENTRY]] ], [ [[I_NEXT:%.*]], [[OUTER_LATCH]] ] +; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 ; CHECK-NEXT: br label [[INNER_HEADER:%.*]] ; CHECK: inner_header: ; CHECK-NEXT: [[J:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ [[J_NEXT:%.*]], [[INNER_HEADER]] ] @@ -22,7 +23,6 @@ define void @test(i64 %a) { ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[J_NEXT]], [[INDVARS_IV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[INNER_HEADER]], label [[OUTER_LATCH]] ; CHECK: outer_latch: -; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 ; CHECK-NEXT: [[COND2:%.*]] = icmp ne i64 [[I_NEXT]], 40 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: br i1 [[COND2]], label [[OUTER_HEADER]], label [[RETURN:%.*]] diff --git a/llvm/test/Transforms/IndVarSimplify/exit-count-select.ll b/llvm/test/Transforms/IndVarSimplify/exit-count-select.ll index 1592b84..829092f 100644 --- a/llvm/test/Transforms/IndVarSimplify/exit-count-select.ll +++ b/llvm/test/Transforms/IndVarSimplify/exit-count-select.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -passes=indvars -S | FileCheck %s +; RUN: opt < %s -passes='require<scalar-evolution>,indvars,loop-mssa(licm)' -S | FileCheck %s define i32 @logical_and_2ops(i32 %n, i32 %m) { ; CHECK-LABEL: @logical_and_2ops( @@ -56,10 +56,10 @@ define i32 @logical_and_3ops(i32 %n, i32 %m, i32 %k) { ; CHECK: loop: ; CHECK-NEXT: br i1 false, label [[LOOP]], label [[EXIT:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[TMP0:%.*]] = freeze i32 [[K:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[M:%.*]] -; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP0]], i32 [[TMP1]]) -; CHECK-NEXT: [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[UMIN]], i32 [[N:%.*]]) +; CHECK-NEXT: [[N:%.*]] = freeze i32 [[K:%.*]] +; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP1]], i32 [[N]]) +; CHECK-NEXT: [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[UMIN]], i32 [[N1:%.*]]) ; CHECK-NEXT: ret i32 [[UMIN1]] ; entry: @@ -84,10 +84,10 @@ define i32 @logical_or_3ops(i32 %n, i32 %m, i32 %k) { ; CHECK: loop: ; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[TMP0:%.*]] = freeze i32 [[K:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[M:%.*]] -; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP0]], i32 [[TMP1]]) -; CHECK-NEXT: [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[UMIN]], i32 [[N:%.*]]) +; CHECK-NEXT: [[N:%.*]] = freeze i32 [[K:%.*]] +; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP1]], i32 [[N]]) +; CHECK-NEXT: [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[UMIN]], i32 [[N1:%.*]]) ; CHECK-NEXT: ret i32 [[UMIN1]] ; entry: diff --git a/llvm/test/Transforms/IndVarSimplify/finite-exit-comparisons.ll b/llvm/test/Transforms/IndVarSimplify/finite-exit-comparisons.ll index e006d9f..f798eb28 100644 --- a/llvm/test/Transforms/IndVarSimplify/finite-exit-comparisons.ll +++ b/llvm/test/Transforms/IndVarSimplify/finite-exit-comparisons.ll @@ -932,6 +932,9 @@ for.end: ; preds = %for.body, %entry define i16 @ult_multiuse_profit(i16 %n.raw, i8 %start) mustprogress { ; CHECK-LABEL: @ult_multiuse_profit( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[START:%.*]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[TMP2]] to i16 +; CHECK-NEXT: [[UMAX:%.*]] = call i16 @llvm.umax.i16(i16 [[TMP1]], i16 254) ; CHECK-NEXT: [[TMP0:%.*]] = trunc i16 254 to i8 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: @@ -940,9 +943,6 @@ define i16 @ult_multiuse_profit(i16 %n.raw, i8 %start) mustprogress { ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[TMP0]] ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] ; CHECK: for.end: -; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[START:%.*]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP1]] to i16 -; CHECK-NEXT: [[UMAX:%.*]] = call i16 @llvm.umax.i16(i16 [[TMP2]], i16 254) ; CHECK-NEXT: ret i16 [[UMAX]] ; entry: diff --git a/llvm/test/Transforms/IndVarSimplify/pr116483.ll b/llvm/test/Transforms/IndVarSimplify/pr116483.ll index 093e25a..e9e0d22 100644 --- a/llvm/test/Transforms/IndVarSimplify/pr116483.ll +++ b/llvm/test/Transforms/IndVarSimplify/pr116483.ll @@ -4,16 +4,16 @@ define i32 @test() { ; CHECK-LABEL: define i32 @test() { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: br label %[[LOOP_BODY:.*]] -; CHECK: [[LOOP_BODY]]: -; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[LOOP_BODY]] -; CHECK: [[EXIT]]: ; CHECK-NEXT: [[XOR:%.*]] = xor i32 0, 3 ; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[XOR]], 329 ; CHECK-NEXT: [[CONV:%.*]] = trunc i32 [[MUL]] to i16 ; CHECK-NEXT: [[SEXT:%.*]] = shl i16 [[CONV]], 8 ; CHECK-NEXT: [[CONV1:%.*]] = ashr i16 [[SEXT]], 8 ; CHECK-NEXT: [[CONV3:%.*]] = zext i16 [[CONV1]] to i32 +; CHECK-NEXT: br label %[[LOOP_BODY:.*]] +; CHECK: [[LOOP_BODY]]: +; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[LOOP_BODY]] +; CHECK: [[EXIT]]: ; CHECK-NEXT: ret i32 [[CONV3]] ; entry: diff --git a/llvm/test/Transforms/IndVarSimplify/pr24783.ll b/llvm/test/Transforms/IndVarSimplify/pr24783.ll index c521bca..37ecf42 100644 --- a/llvm/test/Transforms/IndVarSimplify/pr24783.ll +++ b/llvm/test/Transforms/IndVarSimplify/pr24783.ll @@ -7,11 +7,11 @@ target triple = "powerpc64-unknown-linux-gnu" define void @f(ptr %end.s, ptr %loc, i32 %p) { ; CHECK-LABEL: @f( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i32, ptr [[END_S:%.*]], i32 [[P:%.*]] ; CHECK-NEXT: br label [[WHILE_BODY_I:%.*]] ; CHECK: while.body.i: ; CHECK-NEXT: br i1 true, label [[LOOP_EXIT:%.*]], label [[WHILE_BODY_I]] ; CHECK: loop.exit: -; CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i32, ptr [[END_S:%.*]], i32 [[P:%.*]] ; CHECK-NEXT: store ptr [[END]], ptr [[LOC:%.*]], align 8 ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/IndVarSimplify/pr39673.ll b/llvm/test/Transforms/IndVarSimplify/pr39673.ll index 7b093b3..3cee1ab 100644 --- a/llvm/test/Transforms/IndVarSimplify/pr39673.ll +++ b/llvm/test/Transforms/IndVarSimplify/pr39673.ll @@ -148,6 +148,7 @@ loop2.end: ; preds = %loop2 define i16 @neg_loop_carried(i16 %arg) { ; CHECK-LABEL: @neg_loop_carried( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[ARG:%.*]], 2 ; CHECK-NEXT: br label [[LOOP1:%.*]] ; CHECK: loop1: ; CHECK-NEXT: [[L1:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[L1_ADD:%.*]], [[LOOP1]] ] @@ -155,7 +156,6 @@ define i16 @neg_loop_carried(i16 %arg) { ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i16 [[L1_ADD]], 2 ; CHECK-NEXT: br i1 [[CMP1]], label [[LOOP1]], label [[LOOP2_PREHEADER:%.*]] ; CHECK: loop2.preheader: -; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[ARG:%.*]], 2 ; CHECK-NEXT: br label [[LOOP2:%.*]] ; CHECK: loop2: ; CHECK-NEXT: [[K2:%.*]] = phi i16 [ [[K2_ADD:%.*]], [[LOOP2]] ], [ [[TMP0]], [[LOOP2_PREHEADER]] ] diff --git a/llvm/test/Transforms/IndVarSimplify/pr63763.ll b/llvm/test/Transforms/IndVarSimplify/pr63763.ll index 427db1e..a5fde67 100644 --- a/llvm/test/Transforms/IndVarSimplify/pr63763.ll +++ b/llvm/test/Transforms/IndVarSimplify/pr63763.ll @@ -16,13 +16,13 @@ define i32 @test(i1 %c) { ; CHECK-NEXT: [[CONV2:%.*]] = ashr exact i32 [[SEXT]], 24 ; CHECK-NEXT: [[INVARIANT_OP:%.*]] = sub nsw i32 7, [[CONV2]] ; CHECK-NEXT: call void @use(i32 [[INVARIANT_OP]]) +; CHECK-NEXT: [[SEXT_US:%.*]] = shl i32 [[SEL]], 24 +; CHECK-NEXT: [[CONV2_US:%.*]] = ashr exact i32 [[SEXT_US]], 24 +; CHECK-NEXT: [[INVARIANT_OP_US:%.*]] = sub nsw i32 7, [[CONV2_US]] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[SEXT_US:%.*]] = shl i32 [[SEL]], 24 -; CHECK-NEXT: [[CONV2_US:%.*]] = ashr exact i32 [[SEXT_US]], 24 -; CHECK-NEXT: [[INVARIANT_OP_US:%.*]] = sub nsw i32 7, [[CONV2_US]] ; CHECK-NEXT: ret i32 [[INVARIANT_OP_US]] ; entry: diff --git a/llvm/test/Transforms/IndVarSimplify/replace-loop-exit-folds.ll b/llvm/test/Transforms/IndVarSimplify/replace-loop-exit-folds.ll index b3162de..7cdc98a 100644 --- a/llvm/test/Transforms/IndVarSimplify/replace-loop-exit-folds.ll +++ b/llvm/test/Transforms/IndVarSimplify/replace-loop-exit-folds.ll @@ -4,22 +4,21 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" define i32 @remove_loop(i32 %size) { -; CHECK-LABEL: define i32 @remove_loop( -; CHECK-SAME: i32 [[SIZE:%.*]]) { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: br label %[[WHILE_COND:.*]] -; CHECK: [[WHILE_COND]]: -; CHECK-NEXT: [[SIZE_ADDR_0:%.*]] = phi i32 [ [[SIZE]], %[[ENTRY]] ], [ [[SUB:%.*]], %[[WHILE_COND]] ] -; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[SIZE_ADDR_0]], 31 -; CHECK-NEXT: [[SUB]] = add i32 [[SIZE_ADDR_0]], -32 -; CHECK-NEXT: br i1 [[CMP]], label %[[WHILE_COND]], label %[[WHILE_END:.*]] -; CHECK: [[WHILE_END]]: -; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[SIZE]], 31 +; CHECK-LABEL: @remove_loop( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[SIZE:%.*]], 31 ; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SIZE]], i32 31) ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[UMIN]] ; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 5 ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 5 ; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[SIZE]], [[TMP3]] +; CHECK-NEXT: br label [[WHILE_COND:%.*]] +; CHECK: while.cond: +; CHECK-NEXT: [[SIZE_ADDR_0:%.*]] = phi i32 [ [[SIZE]], [[ENTRY:%.*]] ], [ [[SUB:%.*]], [[WHILE_COND]] ] +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[SIZE_ADDR_0]], 31 +; CHECK-NEXT: [[SUB]] = add i32 [[SIZE_ADDR_0]], -32 +; CHECK-NEXT: br i1 [[CMP]], label [[WHILE_COND]], label [[WHILE_END:%.*]] +; CHECK: while.end: ; CHECK-NEXT: ret i32 [[TMP4]] ; entry: diff --git a/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-values-phi.ll b/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-values-phi.ll index 84ae79d..41fce36 100644 --- a/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-values-phi.ll +++ b/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-values-phi.ll @@ -76,6 +76,10 @@ define i64 @narow_canonical_iv_wide_multiplied_iv(i32 %x, i64 %y, ptr %0) { ; CHECK-LABEL: @narow_canonical_iv_wide_multiplied_iv( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SMAX:%.*]] = tail call i32 @llvm.smax.i32(i32 [[X:%.*]], i32 1) +; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[SMAX]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[Y:%.*]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 1 +; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP3]], 1 ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] @@ -84,10 +88,6 @@ define i64 @narow_canonical_iv_wide_multiplied_iv(i32 %x, i64 %y, ptr %0) { ; CHECK-NEXT: [[EC:%.*]] = icmp ne i32 [[IV_NEXT]], [[SMAX]] ; CHECK-NEXT: br i1 [[EC]], label [[LOOP]], label [[EXIT:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[SMAX]] to i64 -; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[Y:%.*]], [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 1 -; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP3]], 1 ; CHECK-NEXT: ret i64 [[TMP6]] ; entry: diff --git a/llvm/test/Transforms/IndVarSimplify/scev-expander-preserve-lcssa.ll b/llvm/test/Transforms/IndVarSimplify/scev-expander-preserve-lcssa.ll index 14e06fe..aca553e 100644 --- a/llvm/test/Transforms/IndVarSimplify/scev-expander-preserve-lcssa.ll +++ b/llvm/test/Transforms/IndVarSimplify/scev-expander-preserve-lcssa.ll @@ -23,8 +23,8 @@ define void @test1(i8 %x, ptr %ptr) { ; CHECK-NEXT: br label [[WHILE_COND192:%.*]] ; CHECK: while.cond192: ; CHECK-NEXT: switch i8 [[X:%.*]], label [[WHILE_BODY205:%.*]] [ -; CHECK-NEXT: i8 59, label [[WHILE_COND215_PREHEADER:%.*]] -; CHECK-NEXT: i8 10, label [[IF_END224_LOOPEXIT1:%.*]] +; CHECK-NEXT: i8 59, label [[WHILE_COND215_PREHEADER:%.*]] +; CHECK-NEXT: i8 10, label [[IF_END224_LOOPEXIT1:%.*]] ; CHECK-NEXT: ] ; CHECK: while.cond215.preheader: ; CHECK-NEXT: br label [[WHILE_COND215:%.*]] @@ -103,8 +103,8 @@ define void @test2(i16 %x) { ; CHECK-NEXT: br label [[FOR_COND:%.*]] ; CHECK: for.cond: ; CHECK-NEXT: switch i16 [[X:%.*]], label [[RETURN_LOOPEXIT1:%.*]] [ -; CHECK-NEXT: i16 41, label [[FOR_END:%.*]] -; CHECK-NEXT: i16 43, label [[FOR_COND]] +; CHECK-NEXT: i16 41, label [[FOR_END:%.*]] +; CHECK-NEXT: i16 43, label [[FOR_COND]] ; CHECK-NEXT: ] ; CHECK: for.end: ; CHECK-NEXT: [[I_0_LCSSA2:%.*]] = phi i32 [ 0, [[FOR_COND]] ] @@ -336,6 +336,7 @@ if.end1824: ; preds = %for.end1326 define void @test5(ptr %header, i32 %conv, i8 %n) { ; CHECK-LABEL: @test5( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[SHL:%.*]] = shl nuw nsw i32 [[CONV:%.*]], 2 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: ; CHECK-NEXT: br label [[FOR_INNER:%.*]] @@ -358,7 +359,6 @@ define void @test5(ptr %header, i32 %conv, i8 %n) { ; CHECK-NEXT: br i1 false, label [[FOR_BODY]], label [[WHILE_COND_PREHEADER:%.*]] ; CHECK: while.cond.preheader: ; CHECK-NEXT: [[ADD85_LCSSA:%.*]] = phi i32 [ [[ADD85]], [[FOR_INC]] ] -; CHECK-NEXT: [[SHL:%.*]] = shl nuw nsw i32 [[CONV:%.*]], 2 ; CHECK-NEXT: br label [[WHILE_COND:%.*]] ; CHECK: while.cond: ; CHECK-NEXT: [[POS_8:%.*]] = phi i32 [ [[INC114:%.*]], [[WHILE_BODY:%.*]] ], [ [[ADD85_LCSSA]], [[WHILE_COND_PREHEADER]] ] @@ -427,8 +427,8 @@ define void @test6(i8 %x) { ; CHECK-NEXT: br label [[WHILE_COND192:%.*]] ; CHECK: while.cond192: ; CHECK-NEXT: switch i8 [[X:%.*]], label [[WHILE_BODY205:%.*]] [ -; CHECK-NEXT: i8 59, label [[WHILE_COND215_PREHEADER:%.*]] -; CHECK-NEXT: i8 10, label [[IF_END224:%.*]] +; CHECK-NEXT: i8 59, label [[WHILE_COND215_PREHEADER:%.*]] +; CHECK-NEXT: i8 10, label [[IF_END224:%.*]] ; CHECK-NEXT: ] ; CHECK: while.cond215.preheader: ; CHECK-NEXT: [[I_7_LCSSA:%.*]] = phi i32 [ 0, [[WHILE_COND192]] ] diff --git a/llvm/test/Transforms/IndVarSimplify/scev-invalidation.ll b/llvm/test/Transforms/IndVarSimplify/scev-invalidation.ll index a92d328..ad69812 100644 --- a/llvm/test/Transforms/IndVarSimplify/scev-invalidation.ll +++ b/llvm/test/Transforms/IndVarSimplify/scev-invalidation.ll @@ -46,12 +46,12 @@ for.end106: ; preds = %for.cond define i32 @test_pr58439(i32 %a) { ; CHECK-LABEL: @test_pr58439( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[OR:%.*]] = or i32 [[A:%.*]], 1 ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: br i1 false, label [[LOOP]], label [[EXIT:%.*]] ; CHECK: exit: ; CHECK-NEXT: [[C_EXT_LCSSA:%.*]] = phi i32 [ 0, [[LOOP]] ] -; CHECK-NEXT: [[OR:%.*]] = or i32 [[A:%.*]], 1 ; CHECK-NEXT: [[RES:%.*]] = add i32 [[C_EXT_LCSSA]], [[OR]] ; CHECK-NEXT: ret i32 [[RES]] ; @@ -76,6 +76,7 @@ define i8 @l(i32 %inc, i1 %tobool.not.i) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[OUTER_HEADER:%.*]] ; CHECK: outer.header: +; CHECK-NEXT: [[AND:%.*]] = and i32 1, [[INC:%.*]] ; CHECK-NEXT: br label [[INNER:%.*]] ; CHECK: inner: ; CHECK-NEXT: [[C_05_I:%.*]] = phi i32 [ [[INC_I:%.*]], [[INNER]] ], [ 0, [[OUTER_HEADER]] ] @@ -86,7 +87,6 @@ define i8 @l(i32 %inc, i1 %tobool.not.i) { ; CHECK: outer.latch: ; CHECK-NEXT: [[C_05_I_LCSSA:%.*]] = phi i32 [ [[C_05_I]], [[INNER]] ] ; CHECK-NEXT: [[LCSSA:%.*]] = phi i32 [ 0, [[INNER]] ] -; CHECK-NEXT: [[AND:%.*]] = and i32 1, [[INC:%.*]] ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[AND]] to i8 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C_05_I_LCSSA]] to i8 ; CHECK-NEXT: [[TMP2:%.*]] = sub i8 [[TMP0]], [[TMP1]] diff --git a/llvm/test/Transforms/IndVarSimplify/sentinel.ll b/llvm/test/Transforms/IndVarSimplify/sentinel.ll index 5234141..4f12308 100644 --- a/llvm/test/Transforms/IndVarSimplify/sentinel.ll +++ b/llvm/test/Transforms/IndVarSimplify/sentinel.ll @@ -9,19 +9,19 @@ define void @test(i1 %arg) personality ptr @snork { ; CHECK-NEXT: bb: ; CHECK-NEXT: br label [[BB4:%.*]] ; CHECK: bb1: -; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add i32 [[INDVARS_IV:%.*]], 1 -; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[TMP6:%.*]], [[INDVARS_IV]] -; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[SMAX:%.*]] ; CHECK-NEXT: br i1 [[ARG:%.*]], label [[BB2:%.*]], label [[BB4]] ; CHECK: bb2: -; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[TMP1]], [[BB1:%.*]] ] +; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[TMP1:%.*]], [[BB1:%.*]] ] ; CHECK-NEXT: ret void ; CHECK: bb4: -; CHECK-NEXT: [[INDVARS_IV]] = phi i32 [ [[INDVARS_IV_NEXT]], [[BB1]] ], [ undef, [[BB:%.*]] ] -; CHECK-NEXT: [[SMAX]] = call i32 @llvm.smax.i32(i32 [[INDVARS_IV]], i32 36) -; CHECK-NEXT: [[TMP6]] = invoke i32 @quux() [ "deopt"(i32 0, i32 0, i32 0, i32 180, i32 0, i32 25, i32 0, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 3, i32 [[INDVARS_IV]], i32 3, i32 undef, i32 7, ptr null, i32 3, i32 undef, i32 3, i32 undef, i32 3, i32 undef, i32 3, i32 undef, i32 4, double undef, i32 7, ptr null, i32 4, i64 undef, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 3, i32 undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 7, ptr null) ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[BB1]] ], [ undef, [[BB:%.*]] ] +; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[INDVARS_IV]], i32 36) +; CHECK-NEXT: [[TMP6:%.*]] = invoke i32 @quux() [ "deopt"(i32 0, i32 0, i32 0, i32 180, i32 0, i32 25, i32 0, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 3, i32 [[INDVARS_IV]], i32 3, i32 undef, i32 7, ptr null, i32 3, i32 undef, i32 3, i32 undef, i32 3, i32 undef, i32 3, i32 undef, i32 4, double undef, i32 7, ptr null, i32 4, i64 undef, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 3, i32 undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 7, ptr null) ] ; CHECK-NEXT: to label [[BB7:%.*]] unwind label [[BB15:%.*]] ; CHECK: bb7: +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[TMP6]], [[INDVARS_IV]] +; CHECK-NEXT: [[TMP1]] = sub i32 [[TMP0]], [[SMAX]] ; CHECK-NEXT: br label [[BB9:%.*]] ; CHECK: bb9: ; CHECK-NEXT: br i1 true, label [[BB1]], label [[BB9]] diff --git a/llvm/test/Transforms/IndVarSimplify/sink-from-preheader.ll b/llvm/test/Transforms/IndVarSimplify/sink-from-preheader.ll deleted file mode 100644 index 89583f9..0000000 --- a/llvm/test/Transforms/IndVarSimplify/sink-from-preheader.ll +++ /dev/null @@ -1,32 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -passes=indvars -indvars-predicate-loops=0 -S | FileCheck %s -target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" -target triple = "i386-apple-darwin10.0" - -; We make sinking here, Changed flag should be set properly. -define i32 @test(i32 %a, i32 %b, i32 %N) { -; CHECK-LABEL: @test( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]] -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]] -; CHECK: exit: -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: ret i32 [[ADD]] -; -entry: - %add = add i32 %a, %b - br label %loop - -loop: - %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] - %iv.next = add i32 %iv, 1 - %cmp = icmp slt i32 %iv.next, %N - br i1 %cmp, label %loop, label %exit - -exit: - ret i32 %add -} diff --git a/llvm/test/Transforms/IndVarSimplify/sink-trapping.ll b/llvm/test/Transforms/IndVarSimplify/sink-trapping.ll deleted file mode 100644 index d2478be..0000000 --- a/llvm/test/Transforms/IndVarSimplify/sink-trapping.ll +++ /dev/null @@ -1,19 +0,0 @@ -; RUN: opt < %s -passes=indvars -S | FileCheck %s - -declare i1 @b() - -define i32 @a(i32 %x) nounwind { -for.body.preheader: - %y = sdiv i32 10, %x - br label %for.body - -for.body: - %cmp = call i1 @b() - br i1 %cmp, label %for.body, label %for.end.loopexit - -for.end.loopexit: - ret i32 %y -} -; CHECK: for.end.loopexit: -; CHECK: sdiv -; CHECK: ret diff --git a/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll b/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll index 17921af..abe7a3e 100644 --- a/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll +++ b/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll @@ -24,13 +24,13 @@ define void @_Z3fn1v() { ; CHECK-NEXT: [[X8:%.*]] = icmp ult i32 0, 4 ; CHECK-NEXT: br i1 [[X8]], label [[DOTPREHEADER_LR_PH:%.*]], label [[X22]] ; CHECK: .preheader.lr.ph: +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[K_09]], i64 [[TMP5]] ; CHECK-NEXT: br label [[DOTPREHEADER:%.*]] ; CHECK: .preheader: ; CHECK-NEXT: br label [[X17:%.*]] ; CHECK: x17: ; CHECK-NEXT: br i1 false, label [[DOTPREHEADER]], label [[DOT_CRIT_EDGE_8:%.*]] ; CHECK: ._crit_edge.8: -; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[K_09]], i64 [[TMP5]] ; CHECK-NEXT: br label [[X22]] ; CHECK: x22: ; CHECK-NEXT: [[K_1_LCSSA:%.*]] = phi ptr [ [[SCEVGEP]], [[DOT_CRIT_EDGE_8]] ], [ [[K_09]], [[DOTPREHEADER4]] ] diff --git a/llvm/test/Transforms/LICM/scalar-promote.ll b/llvm/test/Transforms/LICM/scalar-promote.ll index 3af65df..e6cc457 100644 --- a/llvm/test/Transforms/LICM/scalar-promote.ll +++ b/llvm/test/Transforms/LICM/scalar-promote.ll @@ -43,9 +43,9 @@ define void @test2(i32 %i) { ; CHECK-LABEL: define void @test2( ; CHECK-SAME: i32 [[I:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[X1:%.*]] = getelementptr i32, ptr @X, i64 1 ; CHECK-NEXT: [[X2:%.*]] = getelementptr i32, ptr @X, i64 1 -; CHECK-NEXT: [[X1_PROMOTED:%.*]] = load i32, ptr [[X1]], align 4 +; CHECK-NEXT: [[X3:%.*]] = getelementptr i32, ptr @X, i64 1 +; CHECK-NEXT: [[X1_PROMOTED:%.*]] = load i32, ptr [[X2]], align 4 ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: ; CHECK-NEXT: [[A1:%.*]] = phi i32 [ [[V:%.*]], %[[LOOP]] ], [ [[X1_PROMOTED]], %[[ENTRY]] ] @@ -53,7 +53,7 @@ define void @test2(i32 %i) { ; CHECK-NEXT: br i1 false, label %[[LOOP]], label %[[EXIT:.*]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[V_LCSSA:%.*]] = phi i32 [ [[V]], %[[LOOP]] ] -; CHECK-NEXT: store i32 [[V_LCSSA]], ptr [[X1]], align 4 +; CHECK-NEXT: store i32 [[V_LCSSA]], ptr [[X2]], align 4 ; CHECK-NEXT: ret void ; Entry: diff --git a/llvm/test/Transforms/IndVarSimplify/sink-alloca.ll b/llvm/test/Transforms/LICM/sink-alloca.ll index 0997bf6..2bf9350 100644 --- a/llvm/test/Transforms/IndVarSimplify/sink-alloca.ll +++ b/llvm/test/Transforms/LICM/sink-alloca.ll @@ -1,9 +1,9 @@ -; RUN: opt < %s -passes=indvars -S | FileCheck %s +; RUN: opt < %s -passes=licm -verify-memoryssa -S | FileCheck %s target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" target triple = "i386-apple-darwin10.0" ; PR4775 -; Indvars shouldn't sink the alloca out of the entry block, even though +; LICM shouldn't sink the alloca out of the entry block, even though ; it's not used until after the loop. define i32 @main() nounwind { ; CHECK: entry: @@ -25,7 +25,7 @@ while.end: ; preds = %while.cond declare i32 @bar() ; <rdar://problem/10352360> -; Indvars shouldn't sink the first alloca between the stacksave and stackrestore +; LICM shouldn't sink the first alloca between the stacksave and stackrestore ; intrinsics. declare ptr @a(...) declare ptr @llvm.stacksave() nounwind diff --git a/llvm/test/Transforms/LICM/sink-from-preheader.ll b/llvm/test/Transforms/LICM/sink-from-preheader.ll new file mode 100644 index 0000000..bbe3d3b --- /dev/null +++ b/llvm/test/Transforms/LICM/sink-from-preheader.ll @@ -0,0 +1,185 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=licm -verify-memoryssa -S | FileCheck %s + +; We perform sinking here, Changed flag should be set properly. +define i32 @test(i32 %a, i32 %b, i32 %N) { +; CHECK-LABEL: @test( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]] +; CHECK: exit: +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: ret i32 [[ADD]] +; +entry: + %add = add i32 %a, %b + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %iv.next = add i32 %iv, 1 + %cmp = icmp slt i32 %iv.next, %N + br i1 %cmp, label %loop, label %exit + +exit: + ret i32 %add +} + +define i32 @test_with_unused_load(i32 %a, ptr %b, i32 %N) { +; CHECK-LABEL: @test_with_unused_load( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]] +; CHECK: exit: +; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[LOAD]] +; CHECK-NEXT: ret i32 [[ADD]] +; +entry: + %load = load i32, ptr %b + %add = add i32 %a, %load + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %iv.next = add i32 %iv, 1 + %cmp = icmp slt i32 %iv.next, %N + br i1 %cmp, label %loop, label %exit + +exit: + ret i32 %add +} + +define i32 @test_with_unused_load_modified_store(i32 %a, ptr %b, i32 %N) { +; CHECK-LABEL: @test_with_unused_load_modified_store( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[B:%.*]], align 4 +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[A:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]] +; CHECK: exit: +; CHECK-NEXT: [[SMAX:%.*]] = phi i32 [ [[IV_NEXT]], [[LOOP]] ] +; CHECK-NEXT: store i32 [[SMAX]], ptr [[B]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A]], [[LOAD]] +; CHECK-NEXT: ret i32 [[ADD]] +; +entry: + %load = load i32, ptr %b + %add = add i32 %a, %load + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %iv.next = add i32 %iv, %a + store i32 %iv.next, ptr %b + %cmp = icmp slt i32 %iv.next, %N + br i1 %cmp, label %loop, label %exit + +exit: + ret i32 %add +} + +; Volatile loads must not be sunk. +define i32 @test_with_volatile_load_no_sink(i32 %a, ptr %b, i32 %N) { +; CHECK-LABEL: @test_with_volatile_load_no_sink( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LD:%.*]] = load volatile i32, ptr [[B:%.*]], align 4 +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]] +; CHECK: exit: +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[LD]] +; CHECK-NEXT: ret i32 [[ADD]] +; +entry: + %ld = load volatile i32, ptr %b, align 4 + %add = add i32 %a, %ld + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %iv.next = add i32 %iv, 1 + %cmp = icmp slt i32 %iv.next, %N + br i1 %cmp, label %loop, label %exit + +exit: + ret i32 %add +} + +; Ordered/atomic loads must not be sunk. +define i32 @test_with_atomic_load_no_sink(i32 %a, ptr %b, i32 %N) { +; CHECK-LABEL: @test_with_atomic_load_no_sink( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LD:%.*]] = load atomic i32, ptr [[B:%.*]] acquire, align 4 +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]] +; CHECK: exit: +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[LD]] +; CHECK-NEXT: ret i32 [[ADD]] +; +entry: + %ld = load atomic i32, ptr %b acquire, align 4 + %add = add i32 %a, %ld + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %iv.next = add i32 %iv, 1 + %cmp = icmp slt i32 %iv.next, %N + br i1 %cmp, label %loop, label %exit + +exit: + ret i32 %add +} + +declare void @clobber(ptr) + +; Calls that may write memory in the loop should prevent sinking the load. +define i32 @test_with_unused_load_clobbered_by_call(i32 %a, ptr %b, i32 %N) { +; CHECK-LABEL: @test_with_unused_load_clobbered_by_call( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[LD:%.*]] = load i32, ptr [[B:%.*]], align 4 +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: call void @clobber(ptr [[B]]) +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]] +; CHECK: exit: +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[LD]] +; CHECK-NEXT: ret i32 [[ADD]] +; +entry: + %ld = load i32, ptr %b, align 4 + %add = add i32 %a, %ld + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %iv.next = add i32 %iv, 1 + call void @clobber(ptr %b) + %cmp = icmp slt i32 %iv.next, %N + br i1 %cmp, label %loop, label %exit + +exit: + ret i32 %add +} diff --git a/llvm/test/Transforms/LICM/sink-trapping.ll b/llvm/test/Transforms/LICM/sink-trapping.ll new file mode 100644 index 0000000..f4d260d --- /dev/null +++ b/llvm/test/Transforms/LICM/sink-trapping.ll @@ -0,0 +1,28 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -passes=licm -verify-memoryssa -S | FileCheck %s + +declare i1 @b() + +define i32 @a(i32 %x) nounwind { +; CHECK-LABEL: define i32 @a( +; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[FOR_BODY_PREHEADER:.*:]] +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[CMP:%.*]] = call i1 @b() +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_END_LOOPEXIT:.*]] +; CHECK: [[FOR_END_LOOPEXIT]]: +; CHECK-NEXT: [[Y:%.*]] = sdiv i32 10, [[X]] +; CHECK-NEXT: ret i32 [[Y]] +; +for.body.preheader: + %y = sdiv i32 10, %x + br label %for.body + +for.body: + %cmp = call i1 @b() + br i1 %cmp, label %for.body, label %for.end.loopexit + +for.end.loopexit: + ret i32 %y +} diff --git a/llvm/test/Transforms/LoopDeletion/invalidate-scev-after-hoisting.ll b/llvm/test/Transforms/LoopDeletion/invalidate-scev-after-hoisting.ll index bdd51c2..6c19aaa 100644 --- a/llvm/test/Transforms/LoopDeletion/invalidate-scev-after-hoisting.ll +++ b/llvm/test/Transforms/LoopDeletion/invalidate-scev-after-hoisting.ll @@ -84,13 +84,13 @@ define i32 @scev_invalidation_after_deleting(ptr %src) { ; CHECK: inner.2.preheader: ; CHECK-NEXT: br label [[INNER_3_PH:%.*]] ; CHECK: inner.3.ph: +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 0 to i32 ; CHECK-NEXT: br label [[INNER_3:%.*]] ; CHECK: inner.3: ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[SRC:%.*]], align 4 ; CHECK-NEXT: br i1 false, label [[OUTER_LATCH]], label [[INNER_3]] ; CHECK: outer.latch: ; CHECK-NEXT: [[L_LCSSA:%.*]] = phi i32 [ [[L]], [[INNER_3]] ] -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 0 to i32 ; CHECK-NEXT: [[OUTER_IV_NEXT]] = add nsw i32 [[L_LCSSA]], [[TRUNC]] ; CHECK-NEXT: br label [[OUTER_HEADER]] ; diff --git a/llvm/test/Transforms/LoopDistribute/laa-invalidation.ll b/llvm/test/Transforms/LoopDistribute/laa-invalidation.ll index 62c5627..4a55c0e 100644 --- a/llvm/test/Transforms/LoopDistribute/laa-invalidation.ll +++ b/llvm/test/Transforms/LoopDistribute/laa-invalidation.ll @@ -4,11 +4,11 @@ define void @test_pr50940(ptr %A, ptr %B) { ; CHECK-LABEL: @test_pr50940( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 ; CHECK-NEXT: br label [[OUTER_HEADER:%.*]] ; CHECK: outer.header: ; CHECK-NEXT: br i1 false, label [[OUTER_LATCH:%.*]], label [[INNER_PH:%.*]] ; CHECK: inner.ph: -; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 ; CHECK-NEXT: [[GEP_A_3:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 3 ; CHECK-NEXT: br label [[INNER_LVER_CHECK:%.*]] ; CHECK: inner.lver.check: diff --git a/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll index b2ec53c..90995a0 100644 --- a/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll +++ b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll @@ -537,6 +537,52 @@ exit: ; preds = %loop %ret = and i32 %unrelated.next, %crc.next ret i32 %ret } + +define i16 @not.crc.data.next.outside.user(i16 %crc.init, i16 %data.init) { +; CHECK-LABEL: define i16 @not.crc.data.next.outside.user( +; CHECK-SAME: i16 [[CRC_INIT:%.*]], i16 [[DATA_INIT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[TBL_LD:%.*]] = phi i16 [ [[CRC_INIT]], %[[ENTRY]] ], [ [[CRC_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[CRC_BE_SHIFT:%.*]] = phi i16 [ [[DATA_INIT]], %[[ENTRY]] ], [ [[DATA_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[CRC_NEXT3:%.*]] = xor i16 [[CRC_BE_SHIFT]], [[TBL_LD]] +; CHECK-NEXT: [[CRC_SHL:%.*]] = shl i16 [[TBL_LD]], 1 +; CHECK-NEXT: [[CRC_XOR:%.*]] = xor i16 [[CRC_SHL]], 3 +; CHECK-NEXT: [[CHECK_SB:%.*]] = icmp slt i16 [[CRC_NEXT3]], 0 +; CHECK-NEXT: [[CRC_NEXT]] = select i1 [[CHECK_SB]], i16 [[CRC_XOR]], i16 [[CRC_SHL]] +; CHECK-NEXT: [[DATA_NEXT]] = shl i16 [[CRC_BE_SHIFT]], 1 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 +; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp samesign ult i32 [[IV]], 7 +; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[CRC_NEXT_LCSSA:%.*]] = phi i16 [ [[CRC_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[DATA_NEXT_LCSSA:%.*]] = phi i16 [ [[DATA_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[RET:%.*]] = xor i16 [[DATA_NEXT_LCSSA]], [[CRC_NEXT_LCSSA]] +; CHECK-NEXT: ret i16 [[RET]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %crc = phi i16 [ %crc.init, %entry ], [ %crc.next, %loop ] + %data = phi i16 [ %data.init, %entry ], [ %data.next, %loop ] + %xor.crc.data = xor i16 %data, %crc + %crc.shl = shl i16 %crc, 1 + %crc.xor = xor i16 %crc.shl, 3 + %check.sb = icmp slt i16 %xor.crc.data, 0 + %crc.next = select i1 %check.sb, i16 %crc.xor, i16 %crc.shl + %data.next = shl i16 %data, 1 + %iv.next = add nuw nsw i32 %iv, 1 + %exit.cond = icmp samesign ult i32 %iv, 7 + br i1 %exit.cond, label %loop, label %exit + +exit: + %ret = xor i16 %data.next, %crc.next + ret i16 %ret +} ;. ; CHECK: attributes #[[ATTR0]] = { optsize } ;. diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/prefer-all.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/prefer-all.ll index db30fd2..1944a9c 100644 --- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/prefer-all.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/prefer-all.ll @@ -119,8 +119,6 @@ for.end: ; We can't use postindex addressing on the conditional load of qval and can't ; convert the loop condition to a compare with zero, so we should instead use ; offset addressing. -; FIXME: Currently we don't notice the load of qval is conditional, and attempt -; postindex addressing anyway. define i32 @conditional_load(ptr %p, ptr %q, ptr %n) { ; CHECK-LABEL: define i32 @conditional_load( ; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], ptr [[N:%.*]]) { @@ -128,7 +126,6 @@ define i32 @conditional_load(ptr %p, ptr %q, ptr %n) { ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP2:%.*]], %[[FOR_INC:.*]] ], [ [[P]], %[[ENTRY]] ] -; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[FOR_INC]] ], [ [[Q]], %[[ENTRY]] ] ; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ [[IDX_NEXT:%.*]], %[[FOR_INC]] ], [ 0, %[[ENTRY]] ] ; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[RET_NEXT:%.*]], %[[FOR_INC]] ], [ 0, %[[ENTRY]] ] ; CHECK-NEXT: [[PVAL:%.*]] = load i32, ptr [[LSR_IV1]], align 4 @@ -136,6 +133,8 @@ define i32 @conditional_load(ptr %p, ptr %q, ptr %n) { ; CHECK-NEXT: [[SCEVGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4 ; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[FOR_INC]], label %[[IF_THEN:.*]] ; CHECK: [[IF_THEN]]: +; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[IDX]], 2 +; CHECK-NEXT: [[LSR_IV:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP0]] ; CHECK-NEXT: [[QVAL:%.*]] = load i32, ptr [[LSR_IV]], align 4 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[RET]], [[QVAL]] ; CHECK-NEXT: br label %[[FOR_INC]] @@ -143,7 +142,6 @@ define i32 @conditional_load(ptr %p, ptr %q, ptr %n) { ; CHECK-NEXT: [[RET_NEXT]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[RET]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[IDX_NEXT]] = add nuw nsw i64 [[IDX]], 1 ; CHECK-NEXT: [[NVAL:%.*]] = load volatile i64, ptr [[N]], align 8 -; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 4 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IDX_NEXT]], [[NVAL]] ; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT:.*]] ; CHECK: [[EXIT]]: @@ -176,3 +174,141 @@ for.inc: exit: ret i32 %ret.next } + +; We can use postindex addressing for both loads here, even though the second +; may not be executed on every loop iteration. +define i32 @early_exit_load(ptr %p, ptr %q, ptr %n) { +; CHECK-LABEL: define i32 @early_exit_load( +; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], ptr [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP2:%.*]], %[[FOR_INC:.*]] ], [ [[P]], %[[ENTRY]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[FOR_INC]] ], [ [[Q]], %[[ENTRY]] ] +; CHECK-NEXT: [[RET_PHI:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_INC]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ [[IDX_NEXT:%.*]], %[[FOR_INC]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[PVAL:%.*]] = load i32, ptr [[LSR_IV1]], align 4 +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[PVAL]], 0 +; CHECK-NEXT: [[SCEVGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4 +; CHECK-NEXT: br i1 [[CMP1]], label %[[FOR_INC]], label %[[EXIT:.*]] +; CHECK: [[FOR_INC]]: +; CHECK-NEXT: [[QVAL:%.*]] = load i32, ptr [[LSR_IV]], align 4 +; CHECK-NEXT: [[ADD]] = add nsw i32 [[QVAL]], [[RET_PHI]] +; CHECK-NEXT: [[IDX_NEXT]] = add nuw nsw i64 [[IDX]], 1 +; CHECK-NEXT: [[NVAL:%.*]] = load volatile i64, ptr [[N]], align 8 +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 4 +; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i64 [[IDX_NEXT]], [[NVAL]] +; CHECK-NEXT: br i1 [[CMP2]], label %[[FOR_BODY]], label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[RET_PHI]], %[[FOR_BODY]] ], [ [[ADD]], %[[FOR_INC]] ] +; CHECK-NEXT: ret i32 [[RET]] +; +entry: + br label %for.body + +for.body: + %ret.phi = phi i32 [ %add, %for.inc ], [ 0, %entry ] + %idx = phi i64 [ %idx.next, %for.inc ], [ 0, %entry ] + %paddr = getelementptr inbounds nuw i32, ptr %p, i64 %idx + %pval = load i32, ptr %paddr, align 4 + %cmp1 = icmp eq i32 %pval, 0 + br i1 %cmp1, label %for.inc, label %exit + +for.inc: + %qaddr = getelementptr inbounds nuw i32, ptr %q, i64 %idx + %qval = load i32, ptr %qaddr, align 4 + %add = add nsw i32 %qval, %ret.phi + %idx.next = add nuw nsw i64 %idx, 1 + %nval = load volatile i64, ptr %n, align 8 + %cmp2 = icmp slt i64 %idx.next, %nval + br i1 %cmp2, label %for.body, label %exit + +exit: + %ret = phi i32 [ %ret.phi, %for.body ], [ %add, %for.inc ] + ret i32 %ret +} + +; The control-flow before and after the load of qval shouldn't prevent postindex +; addressing from happening. +; FIXME: We choose postindex addressing, but the scevgep is placed in for.inc so +; during codegen we will fail to actually generate a postindex load. +define void @middle_block_load(ptr %p, ptr %q, i64 %n) { +; CHECK-LABEL: define void @middle_block_load( +; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[LSR_IV2:%.*]] = phi ptr [ [[SCEVGEP3:%.*]], %[[FOR_INC:.*]] ], [ [[P]], %[[ENTRY]] ] +; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[FOR_INC]] ], [ [[Q]], %[[ENTRY]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], %[[FOR_INC]] ], [ [[N]], %[[ENTRY]] ] +; CHECK-NEXT: [[PVAL:%.*]] = load i32, ptr [[LSR_IV2]], align 4 +; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[PVAL]], 0 +; CHECK-NEXT: [[SCEVGEP3]] = getelementptr i8, ptr [[LSR_IV2]], i64 4 +; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN1:.*]], label %[[IF_ELSE1:.*]] +; CHECK: [[IF_THEN1]]: +; CHECK-NEXT: tail call void @otherfn1() +; CHECK-NEXT: br label %[[IF_END:.*]] +; CHECK: [[IF_ELSE1]]: +; CHECK-NEXT: tail call void @otherfn2() +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: [[QVAL:%.*]] = load i32, ptr [[LSR_IV1]], align 4 +; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[QVAL]], 0 +; CHECK-NEXT: br i1 [[CMP2]], label %[[IF_THEN2:.*]], label %[[IF_ELSE2:.*]] +; CHECK: [[IF_THEN2]]: +; CHECK-NEXT: tail call void @otherfn1() +; CHECK-NEXT: br label %[[FOR_INC]] +; CHECK: [[IF_ELSE2]]: +; CHECK-NEXT: tail call void @otherfn2() +; CHECK-NEXT: br label %[[FOR_INC]] +; CHECK: [[FOR_INC]]: +; CHECK-NEXT: [[LSR_IV_NEXT]] = add i64 [[LSR_IV]], -1 +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV1]], i64 4 +; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i64 [[LSR_IV_NEXT]], 0 +; CHECK-NEXT: br i1 [[CMP3]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %idx = phi i64 [ %idx.next, %for.inc ], [ 0, %entry ] + %paddr = getelementptr inbounds nuw i32, ptr %p, i64 %idx + %pval = load i32, ptr %paddr, align 4 + %cmp1 = icmp sgt i32 %pval, 0 + br i1 %cmp1, label %if.then1, label %if.else1 + +if.then1: + tail call void @otherfn1() + br label %if.end + +if.else1: + tail call void @otherfn2() + br label %if.end + +if.end: + %qaddr = getelementptr inbounds nuw i32, ptr %q, i64 %idx + %qval = load i32, ptr %qaddr, align 4 + %cmp2 = icmp sgt i32 %qval, 0 + br i1 %cmp2, label %if.then2, label %if.else2 + +if.then2: + tail call void @otherfn1() + br label %for.inc + +if.else2: + tail call void @otherfn2() + br label %for.inc + +for.inc: + %idx.next = add nuw nsw i64 %idx, 1 + %cmp3 = icmp eq i64 %idx.next, %n + br i1 %cmp3, label %exit, label %for.body + +exit: + ret void +} + +declare dso_local void @otherfn1() +declare dso_local void @otherfn2() diff --git a/llvm/test/Transforms/LoopUnroll/followup.ll b/llvm/test/Transforms/LoopUnroll/followup.ll index 051e43d..9dda76e 100644 --- a/llvm/test/Transforms/LoopUnroll/followup.ll +++ b/llvm/test/Transforms/LoopUnroll/followup.ll @@ -1,9 +1,20 @@ -; RUN: opt < %s -S -passes=loop-unroll -unroll-count=2 | FileCheck %s -check-prefixes=COUNT,COMMON -; RUN: opt < %s -S -passes=loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true | FileCheck %s -check-prefixes=EPILOG,COMMON -; RUN: opt < %s -S -passes=loop-unroll -unroll-runtime=true -unroll-runtime-epilog=false | FileCheck %s -check-prefixes=PROLOG,COMMON -; -; Check that followup-attributes are applied after LoopUnroll. +; Check that followup attributes are applied after LoopUnroll. ; +; We choose -unroll-count=3 because it produces partial unrolling of remainder +; loops. Complete unrolling would leave no remainder loop to which to copy +; followup attributes. + +; DEFINE: %{unroll} = opt < %s -S -passes=loop-unroll -unroll-count=3 +; DEFINE: %{epilog} = %{unroll} -unroll-runtime -unroll-runtime-epilog=true +; DEFINE: %{prolog} = %{unroll} -unroll-runtime -unroll-runtime-epilog=false +; DEFINE: %{fc} = FileCheck %s -check-prefixes + +; RUN: %{unroll} | %{fc} COMMON,COUNT +; RUN: %{epilog} | %{fc} COMMON,EPILOG,EPILOG-NO-UNROLL +; RUN: %{prolog} | %{fc} COMMON,PROLOG,PROLOG-NO-UNROLL +; RUN: %{epilog} -unroll-remainder | %{fc} COMMON,EPILOG,EPILOG-UNROLL +; RUN: %{prolog} -unroll-remainder | %{fc} COMMON,PROLOG,PROLOG-UNROLL + target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" define i32 @test(ptr nocapture %a, i32 %n) nounwind uwtable readonly { @@ -36,15 +47,17 @@ for.end: ; preds = %for.body, %entry ; COMMON-LABEL: @test( -; COUNT: br i1 %exitcond.1, label %for.end.loopexit, label %for.body, !llvm.loop ![[LOOP:[0-9]+]] +; COUNT: br i1 %exitcond.2, label %for.end.loopexit, label %for.body, !llvm.loop ![[LOOP:[0-9]+]] ; COUNT: ![[FOLLOWUP_ALL:[0-9]+]] = !{!"FollowupAll"} ; COUNT: ![[FOLLOWUP_UNROLLED:[0-9]+]] = !{!"FollowupUnrolled"} ; COUNT: ![[LOOP]] = distinct !{![[LOOP]], ![[FOLLOWUP_ALL]], ![[FOLLOWUP_UNROLLED]]} -; EPILOG: br i1 %niter.ncmp.7, label %for.end.loopexit.unr-lcssa, label %for.body, !llvm.loop ![[LOOP_0:[0-9]+]] -; EPILOG: br i1 %epil.iter.cmp, label %for.body.epil, label %for.end.loopexit.epilog-lcssa, !llvm.loop ![[LOOP_2:[0-9]+]] +; EPILOG: br i1 %niter.ncmp.2, label %for.end.loopexit.unr-lcssa, label %for.body, !llvm.loop ![[LOOP_0:[0-9]+]] +; EPILOG-NO-UNROLL: br i1 %epil.iter.cmp, label %for.body.epil, label %for.end.loopexit.epilog-lcssa, !llvm.loop ![[LOOP_2:[0-9]+]] +; EPILOG-UNROLL: br i1 %epil.iter.cmp, label %for.body.epil.1, label %for.end.loopexit.epilog-lcssa +; EPILOG-UNROLL: br i1 %epil.iter.cmp.1, label %for.body.epil, label %for.end.loopexit.epilog-lcssa, !llvm.loop ![[LOOP_2:[0-9]+]] ; EPILOG: ![[LOOP_0]] = distinct !{![[LOOP_0]], ![[FOLLOWUP_ALL:[0-9]+]], ![[FOLLOWUP_UNROLLED:[0-9]+]]} ; EPILOG: ![[FOLLOWUP_ALL]] = !{!"FollowupAll"} @@ -53,8 +66,10 @@ for.end: ; preds = %for.body, %entry ; EPILOG: ![[FOLLOWUP_REMAINDER]] = !{!"FollowupRemainder"} -; PROLOG: br i1 %prol.iter.cmp, label %for.body.prol, label %for.body.prol.loopexit.unr-lcssa, !llvm.loop ![[LOOP_0:[0-9]+]] -; PROLOG: br i1 %exitcond.7, label %for.end.loopexit.unr-lcssa, label %for.body, !llvm.loop ![[LOOP_2:[0-9]+]] +; PROLOG-UNROLL: br i1 %prol.iter.cmp, label %for.body.prol.1, label %for.body.prol.loopexit.unr-lcssa +; PROLOG-UNROLL: br i1 %prol.iter.cmp.1, label %for.body.prol, label %for.body.prol.loopexit.unr-lcssa, !llvm.loop ![[LOOP_0:[0-9]+]] +; PROLOG-NO-UNROLL: br i1 %prol.iter.cmp, label %for.body.prol, label %for.body.prol.loopexit.unr-lcssa, !llvm.loop ![[LOOP_0:[0-9]+]] +; PROLOG: br i1 %exitcond.2, label %for.end.loopexit.unr-lcssa, label %for.body, !llvm.loop ![[LOOP_2:[0-9]+]] ; PROLOG: ![[LOOP_0]] = distinct !{![[LOOP_0]], ![[FOLLOWUP_ALL:[0-9]+]], ![[FOLLOWUP_REMAINDER:[0-9]+]]} ; PROLOG: ![[FOLLOWUP_ALL]] = !{!"FollowupAll"} diff --git a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll index 905e67b..7e6b5e9 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll @@ -17,13 +17,12 @@ define void @value_defined_in_loop1_used_for_trip_counts(i32 %start, i1 %c, ptr ; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[ZEXT]], %[[LOOP_1]] ] ; CHECK-NEXT: br i1 false, label %[[LOOP_1_EXIT:.*]], label %[[LOOP_1]] ; CHECK: [[LOOP_1_EXIT]]: -; CHECK-NEXT: [[IV_1_LCSSA2:%.*]] = phi i64 [ [[IV_1]], %[[LOOP_1]] ] ; CHECK-NEXT: [[IV_1_LCSSA:%.*]] = phi i64 [ [[IV_1]], %[[LOOP_1]] ] ; CHECK-NEXT: br i1 [[C]], label %[[LOOP_2_PREHEADER:.*]], label %[[LOOP_3_PREHEADER:.*]] ; CHECK: [[LOOP_3_PREHEADER]]: ; CHECK-NEXT: br label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[IV_1_LCSSA2]], 1 +; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[IV_1]], 1 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] diff --git a/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll b/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll index 62399c5..f9b5127 100644 --- a/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll +++ b/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll @@ -29,7 +29,6 @@ define void @test(i32 %arg, i32 %L1.limit, i32 %L2.switch, i1 %c, ptr %dst) { ; CHECK: L1.early.exit: ; CHECK-NEXT: ret void ; CHECK: L1.exit: -; CHECK-NEXT: [[INDUCTION_IV_LCSSA1:%.*]] = phi i32 [ [[INDUCTION_IV]], [[L1_BACKEDGE]] ] ; CHECK-NEXT: [[L1_EXIT_VAL:%.*]] = phi i32 [ [[L1_SUM_NEXT]], [[L1_BACKEDGE]] ] ; CHECK-NEXT: br label [[L2_HEADER:%.*]] ; CHECK: L2.header.loopexit: @@ -46,11 +45,11 @@ define void @test(i32 %arg, i32 %L1.limit, i32 %L2.switch, i1 %c, ptr %dst) { ; CHECK: vector.ph: ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[L1_EXIT_VAL]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <4 x i32> poison, i32 [[INDUCTION_IV_LCSSA1]], i64 0 +; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <4 x i32> poison, i32 [[INDUCTION_IV]], i64 0 ; CHECK-NEXT: [[DOTSPLAT1:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT1]], <4 x i32> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = mul <4 x i32> <i32 0, i32 1, i32 2, i32 3>, [[DOTSPLAT1]] ; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> splat (i32 1), [[TMP4]] -; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[INDUCTION_IV_LCSSA1]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[INDUCTION_IV]], 4 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP5]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/invalidate-scev-at-scope-after-vectorization.ll b/llvm/test/Transforms/LoopVectorize/invalidate-scev-at-scope-after-vectorization.ll index 1f32f89..32de44c 100644 --- a/llvm/test/Transforms/LoopVectorize/invalidate-scev-at-scope-after-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/invalidate-scev-at-scope-after-vectorization.ll @@ -31,12 +31,11 @@ define void @test_invalidate_scevs_at_scope(ptr %p) { ; CHECK-NEXT: [[C_1:%.*]] = icmp eq i32 [[IV_1]], 100 ; CHECK-NEXT: br i1 [[C_1]], label %[[EXIT_1:.*]], label %[[LOOP_1]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[EXIT_1]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP4]], %[[LOOP_1]] ] ; CHECK-NEXT: [[ADD_LCSSA1:%.*]] = phi i32 [ [[ADD_1]], %[[LOOP_1]] ] -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = add i32 [[DOTLCSSA]], 100 +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = add i32 [[TMP4]], 100 ; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[ADD_LCSSA]], i32 100) ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[SMAX]], -100 -; CHECK-NEXT: [[TMP5:%.*]] = sub i32 [[TMP3]], [[DOTLCSSA]] +; CHECK-NEXT: [[TMP5:%.*]] = sub i32 [[TMP3]], [[TMP4]] ; CHECK-NEXT: [[TMP6:%.*]] = zext i32 [[TMP5]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = add nuw nsw i64 [[TMP6]], 1 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP7]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll index eea2237..abed18a 100644 --- a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll @@ -380,7 +380,6 @@ define void @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP8]], 8589934588 -; CHECK-NEXT: [[IND_END:%.*]] = add nuw nsw i64 [[N_VEC]], [[TMP4]] ; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i32> <i32 poison, i32 0, i32 0, i32 0>, i32 [[ARRAYIDX5_PROMOTED]], i64 0 ; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i32, ptr [[VAR2]], i64 [[TMP4]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -396,6 +395,7 @@ define void @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi <4 x i32> [ [[TMP17]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[IND_END:%.*]] = add nuw nsw i64 [[N_VEC]], [[TMP4]] ; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[DOTLCSSA]]) ; CHECK-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX5]], align 4, !alias.scope [[META27:![0-9]+]], !noalias [[META23]] ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP8]], [[N_VEC]] diff --git a/llvm/test/Transforms/LoopVectorize/nested-loops-scev-expansion.ll b/llvm/test/Transforms/LoopVectorize/nested-loops-scev-expansion.ll index 8525b3a..3bf5c0d 100644 --- a/llvm/test/Transforms/LoopVectorize/nested-loops-scev-expansion.ll +++ b/llvm/test/Transforms/LoopVectorize/nested-loops-scev-expansion.ll @@ -222,10 +222,9 @@ define void @pr52024(ptr %dst, i16 %N) { ; CHECK-NEXT: [[EXITCOND_2:%.*]] = icmp eq i16 [[IV_1_NEXT]], [[N]] ; CHECK-NEXT: br i1 [[EXITCOND_2]], label %[[LOOP_2_PH:.*]], label %[[LOOP_1]] ; CHECK: [[LOOP_2_PH]]: -; CHECK-NEXT: [[IV_1_LCSSA2:%.*]] = phi i16 [ [[IV_1]], %[[LOOP_1_LATCH]] ] ; CHECK-NEXT: [[IV_1_NEXT_LCSSA:%.*]] = phi i16 [ [[IV_1_NEXT]], %[[LOOP_1_LATCH]] ] ; CHECK-NEXT: [[IV_1_NEXT_EXT:%.*]] = sext i16 [[IV_1_NEXT_LCSSA]] to i64 -; CHECK-NEXT: [[TMP0:%.*]] = mul i16 [[IV_1_LCSSA2]], 3 +; CHECK-NEXT: [[TMP0:%.*]] = mul i16 [[IV_1]], 3 ; CHECK-NEXT: br label %[[LOOP_2_HEADER:.*]] ; CHECK: [[LOOP_2_HEADER]]: ; CHECK-NEXT: [[IV_1_REM:%.*]] = urem i64 100, [[IV_1_NEXT_EXT]] diff --git a/llvm/test/Transforms/LoopVectorize/pr45259.ll b/llvm/test/Transforms/LoopVectorize/pr45259.ll index f33437f..7a048a9 100644 --- a/llvm/test/Transforms/LoopVectorize/pr45259.ll +++ b/llvm/test/Transforms/LoopVectorize/pr45259.ll @@ -14,12 +14,10 @@ define i8 @widget(ptr %arr, i8 %t9) { ; CHECK-NEXT: [[C:%.*]] = call i1 @cond() ; CHECK-NEXT: br i1 [[C]], label [[FOR_PREHEADER:%.*]], label [[BB6]] ; CHECK: for.preheader: -; CHECK-NEXT: [[T1_0_LCSSA4:%.*]] = phi ptr [ [[T1_0]], [[BB6]] ] ; CHECK-NEXT: [[T1_0_LCSSA1:%.*]] = phi ptr [ [[T1_0]], [[BB6]] ] ; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[ARR1]] to i32 ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[TMP0]] -; CHECK-NEXT: [[T1_0_LCSSA3:%.*]] = ptrtoint ptr [[T1_0_LCSSA4]] to i64 -; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[T1_0_LCSSA3]] to i32 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[T1_0_LCSSA2]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP3]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/pr58811-scev-expansion.ll b/llvm/test/Transforms/LoopVectorize/pr58811-scev-expansion.ll index 269c3bf..879c7ae 100644 --- a/llvm/test/Transforms/LoopVectorize/pr58811-scev-expansion.ll +++ b/llvm/test/Transforms/LoopVectorize/pr58811-scev-expansion.ll @@ -19,11 +19,10 @@ define void @test1_pr58811() { ; CHECK-NEXT: [[INDUCTION_IV_NEXT]] = add i32 [[INDUCTION_IV]], [[TMP1]] ; CHECK-NEXT: br i1 false, label [[LOOP_1]], label [[LOOP_2_PREHEADER:%.*]] ; CHECK: loop.2.preheader: -; CHECK-NEXT: [[INDUCTION_IV_LCSSA:%.*]] = phi i32 [ [[INDUCTION_IV]], [[LOOP_1]] ] ; CHECK-NEXT: [[IV_1_LCSSA:%.*]] = phi i32 [ [[IV_1]], [[LOOP_1]] ] ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[IND_END:%.*]] = mul i32 196, [[INDUCTION_IV_LCSSA]] +; CHECK-NEXT: [[IND_END:%.*]] = mul i32 196, [[INDUCTION_IV]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -111,8 +110,8 @@ define void @test2_pr58811() { ; CHECK-NEXT: [[INDUCTION_IV_NEXT]] = add i32 [[INDUCTION_IV]], [[TMP1]] ; CHECK-NEXT: br i1 false, label [[LOOP_2]], label [[LOOP_3_PREHEADER:%.*]] ; CHECK: loop.3.preheader: -; CHECK-NEXT: [[IV_2_LCSSA:%.*]] = phi i32 [ [[IV_2]], [[LOOP_2]] ] ; CHECK-NEXT: [[INDUCTION_IV_LCSSA:%.*]] = phi i32 [ [[INDUCTION_IV]], [[LOOP_2]] ] +; CHECK-NEXT: [[IV_2_LCSSA:%.*]] = phi i32 [ [[IV_2]], [[LOOP_2]] ] ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[IND_END:%.*]] = mul i32 196, [[INDUCTION_IV_LCSSA]] @@ -182,12 +181,11 @@ define void @test3_pr58811() { ; CHECK-NEXT: [[ADD101:%.*]] = add i32 [[REM85]], [[P_2]] ; CHECK-NEXT: br i1 false, label [[LOOP_2]], label [[LOOP_3_PREHEADER:%.*]] ; CHECK: loop.3.preheader: -; CHECK-NEXT: [[P_2_LCSSA:%.*]] = phi i32 [ [[P_2]], [[LOOP_2]] ] ; CHECK-NEXT: [[ADD101_LCSSA:%.*]] = phi i32 [ [[ADD101]], [[LOOP_2]] ] ; CHECK-NEXT: [[TMP0:%.*]] = udiv i32 1, [[P_1]] ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i32 [[P_1]], [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], -1 -; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[TMP2]], [[P_2_LCSSA]] +; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[TMP2]], [[P_2]] ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[IND_END:%.*]] = mul i32 196, [[TMP3]] diff --git a/llvm/test/Transforms/LoopVectorize/pr66616.ll b/llvm/test/Transforms/LoopVectorize/pr66616.ll index 1ef614a..1e09340 100644 --- a/llvm/test/Transforms/LoopVectorize/pr66616.ll +++ b/llvm/test/Transforms/LoopVectorize/pr66616.ll @@ -18,10 +18,9 @@ define void @pr66616(ptr %ptr) { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP0]], [[VECTOR_BODY]] ] ; CHECK-NEXT: br label [[LOOP_1:%.*]] ; CHECK: preheader: -; CHECK-NEXT: [[TMP4:%.*]] = sub i32 -1, [[DOTLCSSA]] +; CHECK-NEXT: [[TMP4:%.*]] = sub i32 -1, [[TMP0]] ; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP5]], 1 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP6]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll index c270a23..faca86a 100644 --- a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll +++ b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll @@ -205,12 +205,11 @@ define void @expand_diff_scev_unknown(ptr %dst, i1 %invar.c, i32 %step) mustprog ; CHECK-NEXT: [[INDVAR_NEXT]] = add i32 [[INDVAR]], 1 ; CHECK-NEXT: br i1 [[INVAR_C]], label %[[LOOP_2_PREHEADER:.*]], label %[[LOOP_1]] ; CHECK: [[LOOP_2_PREHEADER]]: -; CHECK-NEXT: [[INDVAR_LCSSA1:%.*]] = phi i32 [ [[INDVAR]], %[[LOOP_1]] ] ; CHECK-NEXT: [[IV_1_LCSSA:%.*]] = phi i32 [ [[IV_1]], %[[LOOP_1]] ] ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[IV_1_LCSSA]], [[STEP]] ; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 0) ; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[STEP]], -2 -; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[INDVAR_LCSSA1]], -1 +; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[INDVAR]], -1 ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[SMAX]], [[TMP4]] ; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP5]], i32 1) @@ -219,7 +218,8 @@ define void @expand_diff_scev_unknown(ptr %dst, i1 %invar.c, i32 %step) mustprog ; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[STEP]], i32 1) ; CHECK-NEXT: [[TMP8:%.*]] = udiv i32 [[TMP7]], [[UMAX]] ; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP6]], [[TMP8]] -; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[INDVAR_LCSSA1]], 2 +; CHECK-NEXT: [[TMP16:%.*]] = sub i32 2, [[STEP]] +; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[IV_1_LCSSA]], [[TMP16]] ; CHECK-NEXT: [[SMAX1:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP12]], i32 0) ; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[TMP3]], -1 ; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[SMAX1]], [[TMP14]] diff --git a/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll b/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll index c7b2704..479d859 100644 --- a/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll +++ b/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll @@ -19,15 +19,14 @@ define void @test_pr63368(i1 %c, ptr %A) { ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP0]], [[VECTOR_BODY]] ] ; CHECK-NEXT: br label [[EXIT_1:%.*]] ; CHECK: exit.1: -; CHECK-NEXT: [[SMAX1:%.*]] = call i32 @llvm.smax.i32(i32 [[DOTLCSSA]], i32 -1) +; CHECK-NEXT: [[SMAX1:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP0]], i32 -1) ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[SMAX1]], 2 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP2]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; CHECK: vector.scevcheck: -; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 poison, i32 -1) +; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP0]], i32 -1) ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[SMAX]], 1 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8 ; CHECK-NEXT: [[TMP5:%.*]] = add i8 1, [[TMP4]] @@ -61,7 +60,7 @@ define void @test_pr63368(i1 %c, ptr %A) { ; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i8 [[IV_2_NEXT]] ; CHECK-NEXT: store i8 0, ptr [[GEP_A]], align 1 ; CHECK-NEXT: [[IV_2_SEXT:%.*]] = sext i8 [[IV_2]] to i32 -; CHECK-NEXT: [[EC_2:%.*]] = icmp sge i32 [[DOTLCSSA]], [[IV_2_SEXT]] +; CHECK-NEXT: [[EC_2:%.*]] = icmp sge i32 [[TMP0]], [[IV_2_SEXT]] ; CHECK-NEXT: br i1 [[EC_2]], label [[LOOP_2]], label [[EXIT_2]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: exit.2: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/MemCpyOpt/stack-move.ll b/llvm/test/Transforms/MemCpyOpt/stack-move.ll index 940e30e..0c2e05f 100644 --- a/llvm/test/Transforms/MemCpyOpt/stack-move.ll +++ b/llvm/test/Transforms/MemCpyOpt/stack-move.ll @@ -1729,3 +1729,61 @@ define i32 @test_ret_only_capture() { %v = load i32, ptr %a ret i32 %v } + +declare ptr @captures_address_only(ptr captures(address)) + +; Can transform: Only one address captured. +define void @test_captures_address_captures_none() { +; CHECK-LABEL: define void @test_captures_address_captures_none() { +; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 +; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 +; CHECK-NEXT: call void @captures_address_only(ptr [[SRC]]) +; CHECK-NEXT: call void @use_nocapture(ptr [[SRC]]) +; CHECK-NEXT: ret void +; + %src = alloca %struct.Foo, align 4 + %dst = alloca %struct.Foo, align 4 + store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src + call void @captures_address_only(ptr %src) + call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %src, i64 12, i1 false) + call void @use_nocapture(ptr %dst) + ret void +} + +; Can transform: Only one address captured. +define void @test_captures_none_and_captures_address() { +; CHECK-LABEL: define void @test_captures_none_and_captures_address() { +; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 +; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 +; CHECK-NEXT: call void @use_nocapture(ptr [[SRC]]) +; CHECK-NEXT: call void @captures_address_only(ptr [[SRC]]) +; CHECK-NEXT: ret void +; + %src = alloca %struct.Foo, align 4 + %dst = alloca %struct.Foo, align 4 + store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src + call void @use_nocapture(ptr %src) + call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %src, i64 12, i1 false) + call void @captures_address_only(ptr %dst) + ret void +} + +; Cannot transform: Both addresses captured. +define void @test_captures_address_and_captures_address() { +; CHECK-LABEL: define void @test_captures_address_and_captures_address() { +; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 +; CHECK-NEXT: [[DST:%.*]] = alloca [[STRUCT_FOO]], align 4 +; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4 +; CHECK-NEXT: call void @captures_address_only(ptr [[SRC]]) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DST]], ptr align 4 [[SRC]], i64 12, i1 false) +; CHECK-NEXT: call void @captures_address_only(ptr [[DST]]) +; CHECK-NEXT: ret void +; + %src = alloca %struct.Foo, align 4 + %dst = alloca %struct.Foo, align 4 + store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src + call void @captures_address_only(ptr %src) + call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %src, i64 12, i1 false) + call void @captures_address_only(ptr %dst) + ret void +} diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll index 8d20a3b..d311f54 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll @@ -43,7 +43,6 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY_PREHEADER13]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP8]], -8 -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[N_VEC]], [[TMP0]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -64,6 +63,7 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[N_VEC]], [[TMP0]] ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP8]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END]], label [[FOR_BODY_PREHEADER13]] ; CHECK: for.body.preheader14: diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll index 2dceb27..f2ae327 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll @@ -1040,7 +1040,6 @@ define void @saxpy_5(i64 %n, float %a, ptr readonly %x, ptr noalias %y) { ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[LOOP_PREHEADER11:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP3]], 9223372036854775806 -; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[N_VEC]], 5 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[A]], i64 0 ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT]], <2 x float> poison, <10 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1058,10 +1057,11 @@ define void @saxpy_5(i64 %n, float %a, ptr readonly %x, ptr noalias %y) { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[N_VEC]], 5 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT]], label %[[LOOP_PREHEADER11]] ; CHECK: [[LOOP_PREHEADER11]]: -; CHECK-NEXT: [[I1_PH:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[I1_PH:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0 ; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x float> [[TMP10]], <4 x float> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: br label %[[LOOP:.*]] diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll index a3b8736..338d925 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll @@ -9,7 +9,6 @@ define i64 @std_find_i16_constant_offset_with_assumptions(ptr %first.coerce, i16 ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[FIRST_COERCE]], i64 2) ] ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[FIRST_COERCE]], i64 256) ] -; CHECK-NEXT: [[COERCE_VAL_IP:%.*]] = getelementptr i8, ptr [[FIRST_COERCE]], i64 256 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[S]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] @@ -27,6 +26,7 @@ define i64 @std_find_i16_constant_offset_with_assumptions(ptr %first.coerce, i16 ; CHECK-NEXT: [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]] ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_SPLIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_SPLIT]]: +; CHECK-NEXT: [[COERCE_VAL_IP:%.*]] = getelementptr i8, ptr [[FIRST_COERCE]], i64 256 ; CHECK-NEXT: br i1 [[TMP2]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[RETURN:.*]] ; CHECK: [[VECTOR_EARLY_EXIT]]: ; CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.experimental.cttz.elts.i64.v8i1(<8 x i1> [[TMP0]], i1 true) diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll index 5127b7d..7c349fb 100644 --- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll +++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll @@ -18,22 +18,15 @@ define void @arm_mult_q15(ptr %pSrcA, ptr %pSrcB, ptr noalias %pDst, i32 %blockS ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[WHILE_BODY_PREHEADER15:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[BLOCKSIZE]], -8 -; CHECK-NEXT: [[IND_END:%.*]] = and i32 [[BLOCKSIZE]], 7 -; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[N_VEC]], 1 -; CHECK-NEXT: [[IND_END7:%.*]] = getelementptr i8, ptr [[PSRCA:%.*]], i32 [[TMP0]] -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[N_VEC]], 1 -; CHECK-NEXT: [[IND_END9:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i32 [[TMP1]] -; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[N_VEC]], 1 -; CHECK-NEXT: [[IND_END11:%.*]] = getelementptr i8, ptr [[PSRCB:%.*]], i32 [[TMP2]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRCA]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRCA:%.*]], i32 [[OFFSET_IDX]] ; CHECK-NEXT: [[OFFSET_IDX13:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP14:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[OFFSET_IDX13]] +; CHECK-NEXT: [[NEXT_GEP14:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i32 [[OFFSET_IDX13]] ; CHECK-NEXT: [[OFFSET_IDX15:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP16:%.*]] = getelementptr i8, ptr [[PSRCB]], i32 [[OFFSET_IDX15]] +; CHECK-NEXT: [[NEXT_GEP16:%.*]] = getelementptr i8, ptr [[PSRCB:%.*]], i32 [[OFFSET_IDX15]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2 ; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32> ; CHECK-NEXT: [[WIDE_LOAD17:%.*]] = load <8 x i16>, ptr [[NEXT_GEP16]], align 2 @@ -47,6 +40,13 @@ define void @arm_mult_q15(ptr %pSrcA, ptr %pSrcB, ptr noalias %pDst, i32 %blockS ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: +; CHECK-NEXT: [[IND_END:%.*]] = and i32 [[BLOCKSIZE]], 7 +; CHECK-NEXT: [[TMP13:%.*]] = shl i32 [[N_VEC]], 1 +; CHECK-NEXT: [[IND_END7:%.*]] = getelementptr i8, ptr [[PSRCA]], i32 [[TMP13]] +; CHECK-NEXT: [[TMP14:%.*]] = shl i32 [[N_VEC]], 1 +; CHECK-NEXT: [[IND_END9:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[TMP14]] +; CHECK-NEXT: [[TMP12:%.*]] = shl i32 [[N_VEC]], 1 +; CHECK-NEXT: [[IND_END11:%.*]] = getelementptr i8, ptr [[PSRCB]], i32 [[TMP12]] ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[BLOCKSIZE]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[WHILE_BODY_PREHEADER15]] ; CHECK: while.body.preheader15: diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll index dcfebe3..6e95b63 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll @@ -46,7 +46,6 @@ define dso_local void @test(ptr %start, ptr %end) #0 { ; AVX2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 124 ; AVX2-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[BB12_PREHEADER11:%.*]], label [[VECTOR_PH:%.*]] ; AVX2: vector.ph: -; AVX2-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[TMP3]], 24 ; AVX2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP3]], 9223372036854775776 ; AVX2-NEXT: br label [[VECTOR_BODY:%.*]] ; AVX2: vector.body: @@ -80,6 +79,7 @@ define dso_local void @test(ptr %start, ptr %end) #0 { ; AVX2-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; AVX2-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; AVX2: middle.block: +; AVX2-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[TMP3]], 24 ; AVX2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] ; AVX2-NEXT: br i1 [[CMP_N]], label [[EXIT]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; AVX2: vec.epilog.iter.check: @@ -90,8 +90,6 @@ define dso_local void @test(ptr %start, ptr %end) #0 { ; AVX2: vec.epilog.ph: ; AVX2-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; AVX2-NEXT: [[N_VEC10:%.*]] = and i64 [[TMP3]], 9223372036854775800 -; AVX2-NEXT: [[TMP21:%.*]] = shl i64 [[N_VEC10]], 2 -; AVX2-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP21]] ; AVX2-NEXT: br label [[BB12:%.*]] ; AVX2: vec.epilog.vector.body: ; AVX2-NEXT: [[INDEX12:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[BB12_PREHEADER11]] ], [ [[INDEX_NEXT16:%.*]], [[BB12]] ] @@ -106,6 +104,8 @@ define dso_local void @test(ptr %start, ptr %end) #0 { ; AVX2-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT16]], [[N_VEC10]] ; AVX2-NEXT: br i1 [[TMP25]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[BB12]], !llvm.loop [[LOOP4:![0-9]+]] ; AVX2: vec.epilog.middle.block: +; AVX2-NEXT: [[TMP27:%.*]] = shl i64 [[N_VEC10]], 2 +; AVX2-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP27]] ; AVX2-NEXT: [[CMP_N17:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC10]] ; AVX2-NEXT: br i1 [[CMP_N17]], label [[EXIT]], label [[BB12_PREHEADER1]] ; AVX2: bb12.preheader: diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll b/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll index bfb8554..4562072 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll @@ -16,8 +16,8 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 { ; CHECK-SAME: ptr writeonly captures(none) [[X:%.*]], ptr readonly captures(none) [[Y:%.*]], double [[A:%.*]], i32 [[N:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[N]], 0 -; CHECK-NEXT: br i1 [[CMP1]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_END:.*]] -; CHECK: [[FOR_BODY_PREHEADER]]: +; CHECK-NEXT: br i1 [[CMP1]], label %[[ITER_CHECK:.*]], label %[[FOR_END:.*]] +; CHECK: [[ITER_CHECK]]: ; CHECK-NEXT: [[X4:%.*]] = ptrtoint ptr [[X]] to i64 ; CHECK-NEXT: [[Y5:%.*]] = ptrtoint ptr [[Y]] to i64 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[N]] to i64 @@ -25,12 +25,11 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 { ; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[X4]], [[Y5]] ; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 128 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[MIN_ITERS_CHECK]], i1 true, i1 [[DIFF_CHECK]] -; CHECK-NEXT: br i1 [[OR_COND]], label %[[FOR_BODY_PREHEADER9:.*]], label %[[VECTOR_PH:.*]] -; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br i1 [[OR_COND]], label %[[FOR_BODY_PREHEADER:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: ; CHECK-NEXT: [[MIN_ITERS_CHECK6:%.*]] = icmp ult i32 [[N]], 16 -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK6]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH1:.*]] -; CHECK: [[VECTOR_PH1]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 12 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK6]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 2147483632 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x double> poison, double [[A]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x double> [[BROADCAST_SPLATINSERT]], <4 x double> poison, <4 x i32> zeroinitializer @@ -40,7 +39,7 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 { ; CHECK-NEXT: [[TMP4:%.*]] = fdiv fast <4 x double> splat (double 1.000000e+00), [[BROADCAST_SPLAT]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 32 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 64 @@ -65,13 +64,14 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 { ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 12 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[WIDE_TRIP_COUNT]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp eq i64 [[N_VEC_REMAINING]], 0 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[FOR_BODY_PREHEADER9]], label %[[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[FOR_BODY_PREHEADER]], label %[[VEC_EPILOG_PH]], !prof [[PROF10:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: -; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ] +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_VEC11:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 2147483644 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT14:%.*]] = insertelement <4 x double> poison, double [[A]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT15:%.*]] = shufflevector <4 x double> [[BROADCAST_SPLATINSERT14]], <4 x double> poison, <4 x i32> zeroinitializer @@ -86,12 +86,12 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 { ; CHECK-NEXT: store <4 x double> [[TMP40]], ptr [[TMP41]], align 8, !tbaa [[DOUBLE_TBAA3]] ; CHECK-NEXT: [[INDEX_NEXT16]] = add nuw i64 [[INDEX12]], 4 ; CHECK-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT16]], [[N_VEC11]] -; CHECK-NEXT: br i1 [[TMP42]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP42]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N17:%.*]] = icmp eq i64 [[N_VEC11]], [[WIDE_TRIP_COUNT]] -; CHECK-NEXT: br i1 [[CMP_N17]], label %[[FOR_END]], label %[[FOR_BODY_PREHEADER9]] -; CHECK: [[FOR_BODY_PREHEADER9]]: -; CHECK-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ 0, %[[FOR_BODY_PREHEADER]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[N_VEC11]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] +; CHECK-NEXT: br i1 [[CMP_N17]], label %[[FOR_END]], label %[[FOR_BODY_PREHEADER]] +; CHECK: [[FOR_BODY_PREHEADER]]: +; CHECK-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ 0, %[[ITER_CHECK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[N_VEC11]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: [[TMP43:%.*]] = sub nsw i64 [[WIDE_TRIP_COUNT]], [[INDVARS_IV_PH]] ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP43]], 7 ; CHECK-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 0 @@ -110,13 +110,13 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 { ; CHECK-NEXT: [[INDVARS_IV_NEXT_PROL]] = add nuw nsw i64 [[INDVARS_IV_PROL]], 1 ; CHECK-NEXT: [[PROL_ITER_NEXT]] = add i64 [[PROL_ITER]], 1 ; CHECK-NEXT: [[PROL_ITER_CMP_NOT:%.*]] = icmp eq i64 [[PROL_ITER_NEXT]], [[XTRAITER]] -; CHECK-NEXT: br i1 [[PROL_ITER_CMP_NOT]], label %[[FOR_BODY_PROL_LOOPEXIT]], label %[[FOR_BODY_PROL]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[PROL_ITER_CMP_NOT]], label %[[FOR_BODY_PROL_LOOPEXIT]], label %[[FOR_BODY_PROL]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[FOR_BODY_PROL_LOOPEXIT]]: -; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ [[INDVARS_IV_PH]], %[[FOR_BODY_PREHEADER9]] ], [ [[INDVARS_IV_NEXT_PROL]], %[[FOR_BODY_PROL]] ] +; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ [[INDVARS_IV_PH]], %[[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT_PROL]], %[[FOR_BODY_PROL]] ] ; CHECK-NEXT: [[TMP20:%.*]] = sub nsw i64 [[INDVARS_IV_PH]], [[WIDE_TRIP_COUNT]] ; CHECK-NEXT: [[TMP21:%.*]] = icmp ugt i64 [[TMP20]], -8 -; CHECK-NEXT: br i1 [[TMP21]], label %[[FOR_END]], label %[[FOR_BODY_PREHEADER9_NEW:.*]] -; CHECK: [[FOR_BODY_PREHEADER9_NEW]]: +; CHECK-NEXT: br i1 [[TMP21]], label %[[FOR_END]], label %[[FOR_BODY_PREHEADER_NEW:.*]] +; CHECK: [[FOR_BODY_PREHEADER_NEW]]: ; CHECK-NEXT: [[TMP22:%.*]] = fdiv fast double 1.000000e+00, [[A]] ; CHECK-NEXT: [[TMP23:%.*]] = fdiv fast double 1.000000e+00, [[A]] ; CHECK-NEXT: [[TMP24:%.*]] = fdiv fast double 1.000000e+00, [[A]] @@ -127,7 +127,7 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 { ; CHECK-NEXT: [[TMP29:%.*]] = fdiv fast double 1.000000e+00, [[A]] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_UNR]], %[[FOR_BODY_PREHEADER9_NEW]] ], [ [[INDVARS_IV_NEXT_7:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_UNR]], %[[FOR_BODY_PREHEADER_NEW]] ], [ [[INDVARS_IV_NEXT_7:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[T0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[DOUBLE_TBAA3]] ; CHECK-NEXT: [[TMP30:%.*]] = fmul fast double [[T0]], [[TMP22]] @@ -177,7 +177,7 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 { ; CHECK-NEXT: store double [[TMP37]], ptr [[ARRAYIDX2_7]], align 8, !tbaa [[DOUBLE_TBAA3]] ; CHECK-NEXT: [[INDVARS_IV_NEXT_7]] = add nuw nsw i64 [[INDVARS_IV]], 8 ; CHECK-NEXT: [[EXITCOND_NOT_7:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_7]], [[WIDE_TRIP_COUNT]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT_7]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_7]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret void ; @@ -232,8 +232,9 @@ attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"=" ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META8:![0-9]+]], [[META9:![0-9]+]]} ; CHECK: [[META8]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META9]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META8]], [[META9]]} -; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META12:![0-9]+]]} -; CHECK: [[META12]] = !{!"llvm.loop.unroll.disable"} -; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META8]]} +; CHECK: [[PROF10]] = !{!"branch_weights", i32 4, i32 12} +; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META8]], [[META9]]} +; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META13:![0-9]+]]} +; CHECK: [[META13]] = !{!"llvm.loop.unroll.disable"} +; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META8]]} ;. diff --git a/llvm/test/Transforms/SLPVectorizer/X86/gathered-node-with-in-order-parent.ll b/llvm/test/Transforms/SLPVectorizer/X86/gathered-node-with-in-order-parent.ll new file mode 100644 index 0000000..260de1c --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/X86/gathered-node-with-in-order-parent.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes=slp-vectorizer -S -slp-threshold=-99999 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s + +define double @test() { +; CHECK-LABEL: define double @test() { +; CHECK-NEXT: [[BB:.*]]: +; CHECK-NEXT: br label %[[BB1:.*]] +; CHECK: [[BB1]]: +; CHECK-NEXT: [[TMP0:%.*]] = phi <4 x i32> [ zeroinitializer, %[[BB]] ], [ [[TMP3:%.*]], %[[BB4:.*]] ] +; CHECK-NEXT: br label %[[BB4]] +; CHECK: [[BB4]]: +; CHECK-NEXT: [[MUL:%.*]] = mul i32 0, 1 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> <i32 poison, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 5, i32 6, i32 7> +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> <i32 poison, i32 0, i32 0, i32 0>, i32 [[MUL]], i32 0 +; CHECK-NEXT: [[TMP3]] = or <4 x i32> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> <i32 poison, i32 0, i32 0, i32 1>, i32 [[MUL]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = or <4 x i32> [[TMP0]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i32> [[TMP5]], i32 2 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[TMP6]], 0 +; CHECK-NEXT: br i1 false, label %[[BB7:.*]], label %[[BB1]] +; CHECK: [[BB7]]: +; CHECK-NEXT: [[TMP7:%.*]] = phi <4 x i32> [ [[TMP5]], %[[BB4]] ] +; CHECK-NEXT: ret double 0.000000e+00 +; +bb: + br label %bb1 + +bb1: + %phi = phi i32 [ 0, %bb ], [ 0, %bb4 ] + %phi2 = phi i32 [ 0, %bb ], [ 0, %bb4 ] + %phi3 = phi i32 [ 0, %bb ], [ %or5, %bb4 ] + br label %bb4 + +bb4: + %or = or i32 %phi2, 0 + %mul = mul i32 0, 1 + %or5 = or i32 %phi3, %mul + %and = and i32 %or, 0 + %or6 = or i32 %phi2, 1 + br i1 false, label %bb7, label %bb1 + +bb7: + %phi8 = phi i32 [ %phi, %bb4 ] + %phi9 = phi i32 [ %or, %bb4 ] + %phi10 = phi i32 [ %or5, %bb4 ] + %phi11 = phi i32 [ %or6, %bb4 ] + ret double 0.000000e+00 +} + diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-profile.ll b/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-profile.ll new file mode 100644 index 0000000..9cc417f --- /dev/null +++ b/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-profile.ll @@ -0,0 +1,89 @@ +; RUN: split-file %s %t +; RUN: cat %t/main.ll %t/probable-or.prof > %t/probable-or.ll +; RUN: cat %t/main.ll %t/probable-and.prof > %t/probable-and.ll +; RUN: opt -passes='loop(simple-loop-unswitch<nontrivial>)' -S %t/probable-or.ll -o -| FileCheck %t/probable-or.prof +; RUN: opt -passes='loop(simple-loop-unswitch<nontrivial>)' -S %t/probable-and.ll -o -| FileCheck %t/probable-and.prof + +;--- main.ll +declare i32 @a() +declare i32 @b() + +define i32 @or(ptr %ptr, i1 %cond) !prof !0 { +entry: + br label %loop_begin + +loop_begin: + %v1 = load i1, ptr %ptr + %cond_or = or i1 %v1, %cond + br i1 %cond_or, label %loop_a, label %loop_b, !prof !1 + +loop_a: + call i32 @a() + br label %latch + +loop_b: + call i32 @b() + br label %latch + +latch: + %v2 = load i1, ptr %ptr + br i1 %v2, label %loop_begin, label %loop_exit, !prof !2 + +loop_exit: + ret i32 0 +} + +define i32 @and(ptr %ptr, i1 %cond) !prof !0 { +entry: + br label %loop_begin + +loop_begin: + %v1 = load i1, ptr %ptr + %cond_and = and i1 %v1, %cond + br i1 %cond_and, label %loop_a, label %loop_b, !prof !1 + +loop_a: + call i32 @a() + br label %latch + +loop_b: + call i32 @b() + br label %latch + +latch: + %v2 = load i1, ptr %ptr + br i1 %v2, label %loop_begin, label %loop_exit, !prof !2 + +loop_exit: + ret i32 0 +} + +;--- probable-or.prof +!0 = !{!"function_entry_count", i32 10} +!1 = !{!"branch_weights", i32 1, i32 1000} +!2 = !{!"branch_weights", i32 5, i32 7} +; CHECK-LABEL: @or +; CHECK-LABEL: entry: +; CHECK-NEXT: %cond.fr = freeze i1 %cond +; CHECK-NEXT: br i1 %cond.fr, label %entry.split.us, label %entry.split, !prof !1 +; CHECK-LABEL: @and +; CHECK-LABEL: entry: +; CHECK-NEXT: %cond.fr = freeze i1 %cond +; CHECK-NEXT: br i1 %cond.fr, label %entry.split, label %entry.split.us, !prof !3 +; CHECK: !1 = !{!"branch_weights", i32 1, i32 1000} +; CHECK: !3 = !{!"unknown", !"simple-loop-unswitch"} + +;--- probable-and.prof +!0 = !{!"function_entry_count", i32 10} +!1 = !{!"branch_weights", i32 1000, i32 1} +!2 = !{!"branch_weights", i32 5, i32 7} +; CHECK-LABEL: @or +; CHECK-LABEL: entry: +; CHECK-NEXT: %cond.fr = freeze i1 %cond +; CHECK-NEXT: br i1 %cond.fr, label %entry.split.us, label %entry.split, !prof !1 +; CHECK-LABEL: @and +; CHECK-LABEL: entry: +; CHECK-NEXT: %cond.fr = freeze i1 %cond +; CHECK-NEXT: br i1 %cond.fr, label %entry.split, label %entry.split.us, !prof !3 +; CHECK: !1 = !{!"unknown", !"simple-loop-unswitch"} +; CHECK: !3 = !{!"branch_weights", i32 1000, i32 1} diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/pr60736.ll b/llvm/test/Transforms/SimpleLoopUnswitch/pr60736.ll index 0964c55..3760be4 100644 --- a/llvm/test/Transforms/SimpleLoopUnswitch/pr60736.ll +++ b/llvm/test/Transforms/SimpleLoopUnswitch/pr60736.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals ; RUN: opt < %s -simple-loop-unswitch-inject-invariant-conditions=true -passes='loop(simple-loop-unswitch<nontrivial>,loop-instsimplify)' -S | FileCheck %s define void @test() { @@ -7,7 +7,7 @@ define void @test() { ; CHECK-NEXT: [[TMP:%.*]] = call i1 @llvm.experimental.widenable.condition() ; CHECK-NEXT: [[TMP1:%.*]] = load atomic i32, ptr addrspace(1) poison unordered, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load atomic i32, ptr addrspace(1) poison unordered, align 8 -; CHECK-NEXT: br i1 [[TMP]], label [[BB_SPLIT:%.*]], label [[BB3_SPLIT_US:%.*]] +; CHECK-NEXT: br i1 [[TMP]], label [[BB_SPLIT:%.*]], label [[BB3_SPLIT_US:%.*]], !prof [[PROF0:![0-9]+]] ; CHECK: bb.split: ; CHECK-NEXT: br label [[BB3:%.*]] ; CHECK: bb3: @@ -19,7 +19,7 @@ define void @test() { ; CHECK-NEXT: [[TMP6_US:%.*]] = phi i32 [ poison, [[BB3_SPLIT_US]] ] ; CHECK-NEXT: [[TMP7_US:%.*]] = add nuw nsw i32 [[TMP6_US]], 2 ; CHECK-NEXT: [[TMP8_US:%.*]] = icmp ult i32 [[TMP7_US]], [[TMP2]] -; CHECK-NEXT: br i1 [[TMP8_US]], label [[BB9_US:%.*]], label [[BB16_SPLIT_US:%.*]], !prof [[PROF0:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8_US]], label [[BB9_US:%.*]], label [[BB16_SPLIT_US:%.*]], !prof [[PROF0]] ; CHECK: bb9.us: ; CHECK-NEXT: br label [[BB17_SPLIT_US:%.*]] ; CHECK: bb16.split.us: @@ -96,3 +96,8 @@ declare i1 @llvm.experimental.widenable.condition() !0 = !{!"branch_weights", i32 1048576, i32 1} +;. +; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(inaccessiblemem: readwrite) } +;. +; CHECK: [[PROF0]] = !{!"branch_weights", i32 1048576, i32 1} +;. diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/simple-unswitch-profile.ll b/llvm/test/Transforms/SimpleLoopUnswitch/simple-unswitch-profile.ll new file mode 100644 index 0000000..ec6baa5 --- /dev/null +++ b/llvm/test/Transforms/SimpleLoopUnswitch/simple-unswitch-profile.ll @@ -0,0 +1,157 @@ +; RUN: split-file %s %t +; RUN: cat %t/main.ll %t/probable-or.prof > %t/probable-or.ll +; RUN: cat %t/main.ll %t/probable-and.prof > %t/probable-and.ll +; RUN: opt -passes='loop-mssa(simple-loop-unswitch)' -S %t/probable-or.ll -o - | FileCheck %t/probable-or.prof +; RUN: opt -passes='loop-mssa(simple-loop-unswitch)' -S %t/probable-and.ll -o - | FileCheck %t/probable-and.prof +; +; RUN: opt -passes='module(print<block-freq>),function(loop-mssa(simple-loop-unswitch)),module(print<block-freq>)' \ +; RUN: %t/probable-or.ll -disable-output -simple-loop-unswitch-estimate-profile=0 2>&1 | FileCheck %t/probable-or.prof --check-prefixes=PROFILE-COM,PROFILE-REF + +; RUN: opt -passes='module(print<block-freq>),function(loop-mssa(simple-loop-unswitch)),module(print<block-freq>)' \ +; RUN: %t/probable-or.ll -disable-output -simple-loop-unswitch-estimate-profile=1 2>&1 | FileCheck %t/probable-or.prof --check-prefixes=PROFILE-COM,PROFILE-CHK + +; RUN: opt -passes='module(print<block-freq>),function(loop-mssa(simple-loop-unswitch)),module(print<block-freq>)' \ +; RUN: %t/probable-and.ll -disable-output -simple-loop-unswitch-estimate-profile=0 2>&1 | FileCheck %t/probable-and.prof --check-prefixes=PROFILE-COM,PROFILE-REF + +; RUN: opt -passes='module(print<block-freq>),function(loop-mssa(simple-loop-unswitch)),module(print<block-freq>)' \ +; RUN: %t/probable-and.ll -disable-output -simple-loop-unswitch-estimate-profile=1 2>&1 | FileCheck %t/probable-and.prof --check-prefixes=PROFILE-COM,PROFILE-CHK + +;--- main.ll +declare void @some_func() noreturn + +define i32 @or(i1 %cond1, i32 %var1) !prof !0 { +entry: + br label %loop_begin + +loop_begin: + %var3 = phi i32 [%var1, %entry], [%var2, %do_something] + %cond2 = icmp eq i32 %var3, 10 + %cond.or = or i1 %cond1, %cond2 + br i1 %cond.or, label %loop_exit, label %do_something, !prof !1 + +do_something: + %var2 = add i32 %var3, 1 + call void @some_func() noreturn nounwind + br label %loop_begin + +loop_exit: + ret i32 0 +} + +define i32 @and(i1 %cond1, i32 %var1) !prof !0 { +entry: + br label %loop_begin + +loop_begin: + %var3 = phi i32 [%var1, %entry], [%var2, %do_something] + %cond2 = icmp eq i32 %var3, 10 + %cond.and = and i1 %cond1, %cond2 + br i1 %cond.and, label %do_something, label %loop_exit, !prof !1 + +do_something: + %var2 = add i32 %var3, 1 + call void @some_func() noreturn nounwind + br label %loop_begin + +loop_exit: + ret i32 0 +} + +;--- probable-or.prof +!0 = !{!"function_entry_count", i32 10} +!1 = !{!"branch_weights", i32 1, i32 1000} +; CHECK-LABEL: @or +; CHECK-LABEL: entry: +; CHECK-NEXT: %cond1.fr = freeze i1 %cond1 +; CHECK-NEXT: br i1 %cond1.fr, label %loop_exit.split, label %entry.split, !prof !1 +; CHECK-LABEL: @and +; CHECK-LABEL: entry: +; CHECK-NEXT: %cond1.fr = freeze i1 %cond1 +; CHECK-NEXT: br i1 %cond1.fr, label %entry.split, label %loop_exit.split, !prof !2 +; CHECK: !1 = !{!"branch_weights", i32 1, i32 1000} +; CHECK: !2 = !{!"unknown", !"simple-loop-unswitch"} + +; PROFILE-COM: Printing analysis results of BFI for function 'or': +; PROFILE-COM: block-frequency-info: or + ; PROFILE-COM: - entry: {{.*}} count = 10 + ; PROFILE-COM: - loop_begin: {{.*}} count = 10010 + ; PROFILE-COM: - do_something: {{.*}} count = 10000 + ; PROFILE-COM: - loop_exit: {{.*}} count = 10 + +; PROFILE-COM: Printing analysis results of BFI for function 'and': +; PROFILE-COM: block-frequency-info: and + ; PROFILE-COM: - entry: {{.*}} count = 10 + ; PROFILE-COM: - loop_begin: {{.*}} count = 10 + ; PROFILE-COM: - do_something: {{.*}} count = 0 + ; PROFILE-COM: - loop_exit: {{.*}} count = 10 + +; PROFILE-COM: Printing analysis results of BFI for function 'or': +; PROFILE-COM: block-frequency-info: or + ; PROFILE-COM: - entry: {{.*}} count = 10 + ; PROFILE-REF: - entry.split: {{.*}} count = 5 + ; PROFILE-CHK: - entry.split: {{.*}} count = 10 + ; PROFILE-REF: - loop_begin: {{.*}} count = 5005 + ; PROFILE-CHK: - loop_begin: {{.*}} count = 10000 + ; PROFILE-REF: - do_something: {{.*}} count = 5000 + ; PROFILE-CHK: - do_something: {{.*}} count = 9990 + ; PROFILE-REF: - loop_exit: {{.*}} count = 5 + ; PROFILE-CHK: - loop_exit: {{.*}} count = 10 + ; PROFILE-COM: - loop_exit.split: {{.*}} count = 10 + +; PROFILE-COM: Printing analysis results of BFI for function 'and': +; PROFILE-COM: block-frequency-info: and + ; PROFILE-COM: - entry: {{.*}} count = 10 + ; PROFILE-COM: - entry.split: {{.*}} count = 5 + ; PROFILE-COM: - loop_begin: {{.*}} count = 5 + ; PROFILE-COM: - do_something: {{.*}} count = 0 + ; PROFILE-COM: - loop_exit: {{.*}} count = 5 + ; PROFILE-COM: - loop_exit.split: {{.*}} count = 10 + +;--- probable-and.prof +!0 = !{!"function_entry_count", i32 10} +!1 = !{!"branch_weights", i32 1000, i32 1} +; CHECK-LABEL: @or +; CHECK-LABEL: entry: +; CHECK-NEXT: %cond1.fr = freeze i1 %cond1 +; CHECK-NEXT: br i1 %cond1.fr, label %loop_exit.split, label %entry.split, !prof !1 +; CHECK-LABEL: @and +; CHECK-LABEL: entry: +; CHECK-NEXT: %cond1.fr = freeze i1 %cond1 +; CHECK-NEXT: br i1 %cond1.fr, label %entry.split, label %loop_exit.split, !prof !2 +; CHECK: !1 = !{!"unknown", !"simple-loop-unswitch"} +; CHECK: !2 = !{!"branch_weights", i32 1000, i32 1} +; PROFILE-COM: Printing analysis results of BFI for function 'or': +; PROFILE-COM: block-frequency-info: or + ; PROFILE-COM: - entry: {{.*}}, count = 10 + ; PROFILE-COM: - loop_begin: {{.*}}, count = 10 + ; PROFILE-COM: - do_something: {{.*}}, count = 0 + ; PROFILE-COM: - loop_exit: {{.*}}, count = 10 + +; PROFILE-COM: Printing analysis results of BFI for function 'and': +; PROFILE-COM: block-frequency-info: and + ; PROFILE-COM: - entry: {{.*}} count = 10 + ; PROFILE-COM: - loop_begin: {{.*}} count = 10010 + ; PROFILE-COM: - do_something: {{.*}} count = 10000 + ; PROFILE-COM: - loop_exit: {{.*}} count = 10 + +; PROFILE-COM: Printing analysis results of BFI for function 'or': +; PROFILE-COM: block-frequency-info: or + ; PROFILE-COM: - entry: {{.*}} count = 10 + ; PROFILE-COM: - entry.split: {{.*}} count = 5 + ; PROFILE-COM: - loop_begin: {{.*}} count = 5 + ; PROFILE-COM: - do_something: {{.*}} count = 0 + ; PROFILE-COM: - loop_exit: {{.*}} count = 5 + ; PROFILE-COM: - loop_exit.split: {{.*}} count = 10 + +; PROFILE-COM: Printing analysis results of BFI for function 'and': +; PROFILE-COM: block-frequency-info: and + ; PROFILE-COM: - entry: {{.*}} count = 10 + ; PROFILE-REF: - entry.split: {{.*}} count = 5 + ; PROFILE-CHK: - entry.split: {{.*}} count = 10 + ; PROFILE-REF: - loop_begin: {{.*}} count = 5005 + ; PROFILE-CHK: - loop_begin: {{.*}} count = 10000 + ; PROFILE-REF: - do_something: {{.*}} count = 5000 + ; PROFILE-CHK: - do_something: {{.*}} count = 9990 + ; PROFILE-REF: - loop_exit: {{.*}} count = 5 + ; PROFILE-CHK: - loop_exit: {{.*}} count = 10 + ; PROFILE-COM: - loop_exit.split: {{.*}} count = 10 diff --git a/llvm/test/Transforms/SimplifyCFG/X86/debugloc-switch-powers-of-two.ll b/llvm/test/Transforms/SimplifyCFG/X86/debugloc-switch-powers-of-two.ll new file mode 100644 index 0000000..a276067 --- /dev/null +++ b/llvm/test/Transforms/SimplifyCFG/X86/debugloc-switch-powers-of-two.ll @@ -0,0 +1,81 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool ./build/bin/opt --version 6 +; RUN: opt -passes='simplifycfg<switch-to-lookup>' -simplifycfg-require-and-preserve-domtree=1 -S < %s | FileCheck %s +;; As we replace the switch statement with a set of instructions that may more +;; efficiently perform the conditional check, the DILocation of the switch +;; should be propagated to all of its replacing instructions. + +target triple = "x86_64-unknown-linux-gnu" + +define i32 @switch_of_powers_two_default_reachable(i32 %arg) !dbg !5 { +; CHECK-LABEL: define i32 @switch_of_powers_two_default_reachable( +; CHECK-SAME: i32 [[ARG:%.*]]) !dbg [[DBG5:![0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.ctpop.i32(i32 [[ARG]]), !dbg [[DBG8:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 1, !dbg [[DBG8]] +; CHECK-NEXT: br i1 [[TMP1]], label %[[ENTRY_SPLIT:.*]], label %[[RETURN:.*]], !dbg [[DBG8]] +; CHECK: [[ENTRY_SPLIT]]: +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.cttz.i32(i32 [[ARG]], i1 true), !dbg [[DBG8]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i32 [[TMP2]], 7, !dbg [[DBG8]] +; CHECK-NEXT: br i1 [[TMP3]], label %[[SWITCH_LOOKUP:.*]], label %[[RETURN]], !dbg [[DBG8]] +; CHECK: [[SWITCH_LOOKUP]]: +; CHECK-NEXT: [[TMP4:%.*]] = zext nneg i32 [[TMP2]] to i64, !dbg [[DBG8]] +; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [7 x i32], ptr @switch.table.switch_of_powers_two_default_reachable, i64 0, i64 [[TMP4]], !dbg [[DBG8]] +; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4, !dbg [[DBG8]] +; CHECK-NEXT: br label %[[RETURN]], !dbg [[DBG8]] +; CHECK: [[RETURN]]: +; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 5, %[[ENTRY]] ], [ 5, %[[ENTRY_SPLIT]] ], [ [[SWITCH_LOAD]], %[[SWITCH_LOOKUP]] ] +; CHECK-NEXT: ret i32 [[PHI]] +; +entry: + switch i32 %arg, label %default_case [ + i32 1, label %bb1 + i32 8, label %bb2 + i32 16, label %bb3 + i32 32, label %bb4 + i32 64, label %bb5 + ], !dbg !8 + +default_case: ; preds = %entry + br label %return + +bb1: ; preds = %entry + br label %return + +bb2: ; preds = %entry + br label %return + +bb3: ; preds = %entry + br label %return + +bb4: ; preds = %entry + br label %return + +bb5: ; preds = %entry + br label %return + +return: ; preds = %bb5, %bb4, %bb3, %bb2, %bb1, %default_case + %phi = phi i32 [ 3, %bb1 ], [ 2, %bb2 ], [ 1, %bb3 ], [ 0, %bb4 ], [ 42, %bb5 ], [ 5, %default_case ] + ret i32 %phi +} + +!llvm.dbg.cu = !{!0} +!llvm.debugify = !{!2, !3} +!llvm.module.flags = !{!4} + +!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug) +!1 = !DIFile(filename: "debugloc-switch-powers-of-two.ll", directory: "/") +!2 = !{i32 9} +!3 = !{i32 1} +!4 = !{i32 2, !"Debug Info Version", i32 3} +!5 = distinct !DISubprogram(name: "switch_of_powers_two_default_reachable", linkageName: "switch_of_powers_two_default_reachable", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !7) +!6 = !DISubroutineType(types: !7) +!7 = !{} +!8 = !DILocation(line: 1, column: 1, scope: !5) +;. +; CHECK: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C, file: [[META1:![0-9]+]], producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug) +; CHECK: [[META1]] = !DIFile(filename: "{{.*}}debugloc-switch-powers-of-two.ll", directory: {{.*}}) +; CHECK: [[DBG5]] = distinct !DISubprogram(name: "switch_of_powers_two_default_reachable", linkageName: "switch_of_powers_two_default_reachable", scope: null, file: [[META1]], line: 1, type: [[META6:![0-9]+]], scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META7:![0-9]+]]) +; CHECK: [[META6]] = !DISubroutineType(types: [[META7]]) +; CHECK: [[META7]] = !{} +; CHECK: [[DBG8]] = !DILocation(line: 1, column: 1, scope: [[DBG5]]) +;. diff --git a/llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll b/llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll index 8ce94d1..98c0599 100644 --- a/llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll +++ b/llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll @@ -486,3 +486,119 @@ else: call void @bar() ret float %op2 } + +define void @test_switch_with_unreachable_block_as_default(i1 %c, i32 %x, ptr %ptr) { +; CHECK-LABEL: @test_switch_with_unreachable_block_as_default( +; CHECK-NEXT: br i1 [[C:%.*]], label [[SW1:%.*]], label [[SW2:%.*]] +; CHECK: sw1: +; CHECK-NEXT: switch i32 [[X:%.*]], label [[UNREACHABLE:%.*]] [ +; CHECK-NEXT: i32 1, label [[COMMON_RET:%.*]] +; CHECK-NEXT: i32 2, label [[BAR:%.*]] +; CHECK-NEXT: ] +; CHECK: sw2: +; CHECK-NEXT: store i64 42, ptr [[PTR:%.*]], align 4 +; CHECK-NEXT: br label [[COMMON_RET]] +; CHECK: common.ret: +; CHECK-NEXT: ret void +; CHECK: unreachable: +; CHECK-NEXT: unreachable +; CHECK: bar: +; CHECK-NEXT: call void @bar() +; CHECK-NEXT: br label [[COMMON_RET]] +; + br i1 %c, label %sw1, label %sw2 + +sw1: + ; This switch only exists to have an %unreachable block with multiple predecessors. + switch i32 %x, label %unreachable [ + i32 1, label %foo + i32 2, label %bar + ] + +sw2: + switch i32 %x, label %unreachable [ + i32 1, label %bb1 + i32 2, label %bb2 + i32 3, label %bb3 + ] + +bb1: + store i64 42, ptr %ptr + ret void + +bb2: + store i64 42, ptr %ptr + ret void + +bb3: + store i64 42, ptr %ptr + ret void + +unreachable: + unreachable + +foo: + ret void + +bar: + call void @bar() + ret void +} + +define void @test_switch_with_unreachable_block_as_case(i1 %c, i32 %x, ptr %ptr) { +; CHECK-LABEL: @test_switch_with_unreachable_block_as_case( +; CHECK-NEXT: br i1 [[C:%.*]], label [[SW1:%.*]], label [[SW2:%.*]] +; CHECK: sw1: +; CHECK-NEXT: switch i32 [[X:%.*]], label [[UNREACHABLE:%.*]] [ +; CHECK-NEXT: i32 1, label [[COMMON_RET:%.*]] +; CHECK-NEXT: i32 2, label [[BAR:%.*]] +; CHECK-NEXT: ] +; CHECK: sw2: +; CHECK-NEXT: store i64 42, ptr [[PTR:%.*]], align 4 +; CHECK-NEXT: br label [[COMMON_RET]] +; CHECK: common.ret: +; CHECK-NEXT: ret void +; CHECK: unreachable: +; CHECK-NEXT: unreachable +; CHECK: bar: +; CHECK-NEXT: call void @bar() +; CHECK-NEXT: br label [[COMMON_RET]] +; + br i1 %c, label %sw1, label %sw2 + +sw1: + ; This switch only exists to have an %unreachable block with multiple predecessors. + switch i32 %x, label %unreachable [ + i32 1, label %foo + i32 2, label %bar + ] + +sw2: + switch i32 %x, label %bb3 [ + i32 1, label %bb1 + i32 2, label %bb2 + i32 3, label %unreachable + ] + +bb1: + store i64 42, ptr %ptr + ret void + +bb2: + store i64 42, ptr %ptr + ret void + +bb3: + store i64 42, ptr %ptr + ret void + +unreachable: + unreachable + +foo: + ret void + +bar: + call void @bar() + ret void +} diff --git a/llvm/test/Transforms/UnifyLoopExits/basic.ll b/llvm/test/Transforms/UnifyLoopExits/basic.ll index ccd15d4..d04d142 100644 --- a/llvm/test/Transforms/UnifyLoopExits/basic.ll +++ b/llvm/test/Transforms/UnifyLoopExits/basic.ll @@ -18,12 +18,12 @@ define void @loop_1(i1 %PredEntry, i1 %PredB, i1 %PredC, i1 %PredD) { ; CHECK: F: ; CHECK-NEXT: br label [[EXIT]] ; CHECK: G: -; CHECK-NEXT: br label [[F:%.*]] +; CHECK-NEXT: br label [[Y:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; CHECK: loop.exit.guard: -; CHECK-NEXT: [[GUARD_E:%.*]] = phi i1 [ true, [[B]] ], [ false, [[C]] ], [ false, [[D]] ] -; CHECK-NEXT: br i1 [[GUARD_E]], label [[E:%.*]], label [[F]] +; CHECK-NEXT: [[GUARD_X:%.*]] = phi i1 [ true, [[B]] ], [ false, [[C]] ], [ false, [[D]] ] +; CHECK-NEXT: br i1 [[GUARD_X]], label [[X:%.*]], label [[Y]] ; entry: br i1 %PredEntry, label %A, label %G @@ -53,6 +53,67 @@ exit: ret void } +define void @loop_1_callbr(i1 %PredEntry, i1 %PredB, i1 %PredC, i1 %PredD) { +; CHECK-LABEL: @loop_1_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[G:%.*]] +; CHECK: A: +; CHECK-NEXT: br label [[B:%.*]] +; CHECK: B: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB:%.*]]) +; CHECK-NEXT: to label [[C:%.*]] [label %B.target.E] +; CHECK: C: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDC:%.*]]) +; CHECK-NEXT: to label [[D:%.*]] [label %C.target.F] +; CHECK: D: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDD:%.*]]) +; CHECK-NEXT: to label [[A]] [label %D.target.F] +; CHECK: E: +; CHECK-NEXT: br label [[EXIT:%.*]] +; CHECK: F: +; CHECK-NEXT: br label [[EXIT]] +; CHECK: G: +; CHECK-NEXT: br label [[Y:%.*]] +; CHECK: exit: +; CHECK-NEXT: ret void +; CHECK: B.target.E: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]] +; CHECK: C.target.F: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: D.target.F: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: loop.exit.guard: +; CHECK-NEXT: [[GUARD_X:%.*]] = phi i1 [ true, [[B_TARGET_E:%.*]] ], [ false, [[C_TARGET_F:%.*]] ], [ false, [[D_TARGET_F:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_X]], label [[X:%.*]], label [[Y]] +; +entry: + br i1 %PredEntry, label %A, label %G + +A: + br label %B + +B: + callbr void asm "", "r,!i"(i1 %PredB) to label %C [label %E] + +C: + callbr void asm "", "r,!i"(i1 %PredC) to label %D [label %F] + +D: + callbr void asm "", "r,!i"(i1 %PredD) to label %A [label %F] + +E: + br label %exit + +F: + br label %exit + +G: + br label %F + +exit: + ret void +} + define void @loop_2(i1 %PredA, i1 %PredB, i1 %PredC) { ; CHECK-LABEL: @loop_2( ; CHECK-NEXT: entry: @@ -107,3 +168,67 @@ Z: exit: ret void } + +define void @loop_2_callbr(i1 %PredA, i1 %PredB, i1 %PredC) { +; CHECK-LABEL: @loop_2_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[A:%.*]] +; CHECK: A: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]]) +; CHECK-NEXT: to label [[B:%.*]] [label %A.target.X] +; CHECK: B: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB:%.*]]) +; CHECK-NEXT: to label [[C:%.*]] [label %B.target.Y] +; CHECK: C: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDC:%.*]]) +; CHECK-NEXT: to label [[D:%.*]] [label %C.target.Z] +; CHECK: D: +; CHECK-NEXT: br label [[A]] +; CHECK: X: +; CHECK-NEXT: br label [[EXIT:%.*]] +; CHECK: Y: +; CHECK-NEXT: br label [[EXIT]] +; CHECK: Z: +; CHECK-NEXT: br label [[EXIT]] +; CHECK: exit: +; CHECK-NEXT: ret void +; CHECK: A.target.X: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]] +; CHECK: B.target.Y: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: C.target.Z: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: loop.exit.guard: +; CHECK-NEXT: [[GUARD_X:%.*]] = phi i1 [ true, [[A_TARGET_X:%.*]] ], [ false, [[B_TARGET_Y:%.*]] ], [ false, [[C_TARGET_Z:%.*]] ] +; CHECK-NEXT: [[GUARD_Y:%.*]] = phi i1 [ false, [[A_TARGET_X]] ], [ true, [[B_TARGET_Y]] ], [ false, [[C_TARGET_Z]] ] +; CHECK-NEXT: br i1 [[GUARD_X]], label [[X:%.*]], label [[LOOP_EXIT_GUARD1:%.*]] +; CHECK: loop.exit.guard1: +; CHECK-NEXT: br i1 [[GUARD_Y]], label [[Y:%.*]], label [[Z:%.*]] +; +entry: + br label %A + +A: + callbr void asm "", "r,!i"(i1 %PredA) to label %B [label %X] + +B: + callbr void asm "", "r,!i"(i1 %PredB) to label %C [label %Y] + +C: + callbr void asm "", "r,!i"(i1 %PredC) to label %D [label %Z] + +D: + br label %A + +X: + br label %exit + +Y: + br label %exit + +Z: + br label %exit + +exit: + ret void +} diff --git a/llvm/test/Transforms/UnifyLoopExits/integer_guards.ll b/llvm/test/Transforms/UnifyLoopExits/integer_guards.ll index f55639f..be982d5 100644 --- a/llvm/test/Transforms/UnifyLoopExits/integer_guards.ll +++ b/llvm/test/Transforms/UnifyLoopExits/integer_guards.ll @@ -71,6 +71,85 @@ E: ret void } +define void @loop_two_exits_callbr(i1 %PredEntry, i1 %PredA) { +; CHECK-LABEL: @loop_two_exits_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[E:%.*]] +; CHECK: A: +; CHECK-NEXT: [[INC1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC2:%.*]], [[C:%.*]] ] +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]]) +; CHECK-NEXT: to label [[A_TARGET_B:%.*]] [label %C] +; CHECK: B: +; CHECK-NEXT: tail call fastcc void @check(i32 1) #[[ATTR0]] +; CHECK-NEXT: br label [[D:%.*]] +; CHECK: C: +; CHECK-NEXT: [[INC2]] = add i32 [[INC1]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC2]], 10 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]]) +; CHECK-NEXT: to label [[A]] [label %C.target.E] +; CHECK: D: +; CHECK-NEXT: unreachable +; CHECK: E: +; CHECK-NEXT: ret void +; CHECK: A.target.B: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]] +; CHECK: C.target.E: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: loop.exit.guard: +; CHECK-NEXT: [[MERGED_BB_IDX:%.*]] = phi i32 [ 0, [[A_TARGET_B]] ], [ 1, [[C_TARGET_E:%.*]] ] +; CHECK-NEXT: [[B_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 0 +; CHECK-NEXT: br i1 [[B_PREDICATE]], label [[B:%.*]], label [[E]] +; +; BOOLEAN-LABEL: @loop_two_exits_callbr( +; BOOLEAN-NEXT: entry: +; BOOLEAN-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[E:%.*]] +; BOOLEAN: A: +; BOOLEAN-NEXT: [[INC1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC2:%.*]], [[C:%.*]] ] +; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]]) +; BOOLEAN-NEXT: to label [[A_TARGET_B:%.*]] [label %C] +; BOOLEAN: B: +; BOOLEAN-NEXT: tail call fastcc void @check(i32 1) #[[ATTR0]] +; BOOLEAN-NEXT: br label [[D:%.*]] +; BOOLEAN: C: +; BOOLEAN-NEXT: [[INC2]] = add i32 [[INC1]], 1 +; BOOLEAN-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC2]], 10 +; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]]) +; BOOLEAN-NEXT: to label [[A]] [label %C.target.E] +; BOOLEAN: D: +; BOOLEAN-NEXT: unreachable +; BOOLEAN: E: +; BOOLEAN-NEXT: ret void +; BOOLEAN: A.target.B: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD:%.*]] +; BOOLEAN: C.target.E: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]] +; BOOLEAN: loop.exit.guard: +; BOOLEAN-NEXT: [[GUARD_B:%.*]] = phi i1 [ true, [[A_TARGET_B]] ], [ false, [[C_TARGET_E:%.*]] ] +; BOOLEAN-NEXT: br i1 [[GUARD_B]], label [[B:%.*]], label [[E]] +; +entry: + br i1 %PredEntry, label %A, label %E + +A: + %inc1 = phi i32 [ 0, %entry ], [ %inc2, %C ] + callbr void asm "", "r,!i"(i1 %PredA) to label %B [label %C] + +B: + tail call fastcc void @check(i32 1) #0 + br label %D + +C: + %inc2 = add i32 %inc1, 1 + %cmp = icmp ult i32 %inc2, 10 + callbr void asm "","r,!i"(i1 %cmp) to label %A [label %E] + +D: + unreachable + +E: + ret void +} + ; The loop exit blocks appear in an inner loop. define void @inner_loop(i1 %PredEntry, i1 %PredA, i1 %PredB) { @@ -196,6 +275,164 @@ I: ret void } +define void @inner_loop_callbr(i1 %PredEntry, i1 %PredA, i1 %PredB) { +; CHECK-LABEL: @inner_loop_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[I:%.*]] +; CHECK: A: +; CHECK-NEXT: [[OUTER1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OUTER2:%.*]], [[G:%.*]] ] +; CHECK-NEXT: br label [[B:%.*]] +; CHECK: B: +; CHECK-NEXT: [[INNER1:%.*]] = phi i32 [ 0, [[A]] ], [ [[INNER2:%.*]], [[F:%.*]] ] +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]]) +; CHECK-NEXT: to label [[D:%.*]] [label %B.target.B.target.C] +; CHECK: C: +; CHECK-NEXT: tail call fastcc void @check(i32 1) #[[ATTR0]] +; CHECK-NEXT: br label [[H:%.*]] +; CHECK: D: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB:%.*]]) +; CHECK-NEXT: to label [[D_TARGET_D_TARGET_E:%.*]] [label %F] +; CHECK: E: +; CHECK-NEXT: tail call fastcc void @check(i32 2) #[[ATTR0]] +; CHECK-NEXT: br label [[H]] +; CHECK: F: +; CHECK-NEXT: [[INNER2]] = add i32 [[INNER1]], 1 +; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i32 [[INNER2]], 20 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP1]]) +; CHECK-NEXT: to label [[B]] [label %F.target.G] +; CHECK: G: +; CHECK-NEXT: [[OUTER2]] = add i32 [[OUTER1]], 1 +; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[OUTER2]], 10 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP2]]) +; CHECK-NEXT: to label [[A]] [label %G.target.I] +; CHECK: H: +; CHECK-NEXT: unreachable +; CHECK: I: +; CHECK-NEXT: ret void +; CHECK: B.target.C: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]] +; CHECK: D.target.E: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: G.target.I: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: loop.exit.guard: +; CHECK-NEXT: [[MERGED_BB_IDX:%.*]] = phi i32 [ 0, [[B_TARGET_C:%.*]] ], [ 1, [[D_TARGET_E:%.*]] ], [ 2, [[G_TARGET_I:%.*]] ] +; CHECK-NEXT: [[C_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 0 +; CHECK-NEXT: br i1 [[C_PREDICATE]], label [[C:%.*]], label [[LOOP_EXIT_GUARD1:%.*]] +; CHECK: loop.exit.guard1: +; CHECK-NEXT: [[E_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 1 +; CHECK-NEXT: br i1 [[E_PREDICATE]], label [[E:%.*]], label [[I]] +; CHECK: B.target.B.target.C: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD2:%.*]] +; CHECK: D.target.D.target.E: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD2]] +; CHECK: F.target.G: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD2]] +; CHECK: loop.exit.guard2: +; CHECK-NEXT: [[MERGED_BB_IDX4:%.*]] = phi i32 [ 0, [[B_TARGET_B_TARGET_C:%.*]] ], [ 1, [[D_TARGET_D_TARGET_E]] ], [ 2, [[F_TARGET_G:%.*]] ] +; CHECK-NEXT: [[B_TARGET_C_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX4]], 0 +; CHECK-NEXT: br i1 [[B_TARGET_C_PREDICATE]], label [[B_TARGET_C]], label [[LOOP_EXIT_GUARD3:%.*]] +; CHECK: loop.exit.guard3: +; CHECK-NEXT: [[D_TARGET_E_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX4]], 1 +; CHECK-NEXT: br i1 [[D_TARGET_E_PREDICATE]], label [[D_TARGET_E]], label [[G]] +; +; BOOLEAN-LABEL: @inner_loop_callbr( +; BOOLEAN-NEXT: entry: +; BOOLEAN-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[I:%.*]] +; BOOLEAN: A: +; BOOLEAN-NEXT: [[OUTER1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OUTER2:%.*]], [[G:%.*]] ] +; BOOLEAN-NEXT: br label [[B:%.*]] +; BOOLEAN: B: +; BOOLEAN-NEXT: [[INNER1:%.*]] = phi i32 [ 0, [[A]] ], [ [[INNER2:%.*]], [[F:%.*]] ] +; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]]) +; BOOLEAN-NEXT: to label [[D:%.*]] [label %B.target.B.target.C] +; BOOLEAN: C: +; BOOLEAN-NEXT: tail call fastcc void @check(i32 1) #[[ATTR0]] +; BOOLEAN-NEXT: br label [[H:%.*]] +; BOOLEAN: D: +; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB:%.*]]) +; BOOLEAN-NEXT: to label [[D_TARGET_D_TARGET_E:%.*]] [label %F] +; BOOLEAN: E: +; BOOLEAN-NEXT: tail call fastcc void @check(i32 2) #[[ATTR0]] +; BOOLEAN-NEXT: br label [[H]] +; BOOLEAN: F: +; BOOLEAN-NEXT: [[INNER2]] = add i32 [[INNER1]], 1 +; BOOLEAN-NEXT: [[CMP1:%.*]] = icmp ult i32 [[INNER2]], 20 +; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[CMP1]]) +; BOOLEAN-NEXT: to label [[B]] [label %F.target.G] +; BOOLEAN: G: +; BOOLEAN-NEXT: [[OUTER2]] = add i32 [[OUTER1]], 1 +; BOOLEAN-NEXT: [[CMP2:%.*]] = icmp ult i32 [[OUTER2]], 10 +; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[CMP2]]) +; BOOLEAN-NEXT: to label [[A]] [label %G.target.I] +; BOOLEAN: H: +; BOOLEAN-NEXT: unreachable +; BOOLEAN: I: +; BOOLEAN-NEXT: ret void +; BOOLEAN: B.target.C: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD:%.*]] +; BOOLEAN: D.target.E: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]] +; BOOLEAN: G.target.I: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]] +; BOOLEAN: loop.exit.guard: +; BOOLEAN-NEXT: [[GUARD_C:%.*]] = phi i1 [ true, [[B_TARGET_C:%.*]] ], [ false, [[D_TARGET_E:%.*]] ], [ false, [[G_TARGET_I:%.*]] ] +; BOOLEAN-NEXT: [[GUARD_E:%.*]] = phi i1 [ false, [[B_TARGET_C]] ], [ true, [[D_TARGET_E]] ], [ false, [[G_TARGET_I]] ] +; BOOLEAN-NEXT: br i1 [[GUARD_C]], label [[C:%.*]], label [[LOOP_EXIT_GUARD1:%.*]] +; BOOLEAN: loop.exit.guard1: +; BOOLEAN-NEXT: br i1 [[GUARD_E]], label [[E:%.*]], label [[I]] +; BOOLEAN: B.target.B.target.C: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD2:%.*]] +; BOOLEAN: D.target.D.target.E: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD2]] +; BOOLEAN: F.target.G: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD2]] +; BOOLEAN: loop.exit.guard2: +; BOOLEAN-NEXT: [[GUARD_B_TARGET_C:%.*]] = phi i1 [ true, [[B_TARGET_B_TARGET_C:%.*]] ], [ false, [[D_TARGET_D_TARGET_E]] ], [ false, [[F_TARGET_G:%.*]] ] +; BOOLEAN-NEXT: [[GUARD_D_TARGET_E:%.*]] = phi i1 [ false, [[B_TARGET_B_TARGET_C]] ], [ true, [[D_TARGET_D_TARGET_E]] ], [ false, [[F_TARGET_G]] ] +; BOOLEAN-NEXT: br i1 [[GUARD_B_TARGET_C]], label [[B_TARGET_C]], label [[LOOP_EXIT_GUARD3:%.*]] +; BOOLEAN: loop.exit.guard3: +; BOOLEAN-NEXT: br i1 [[GUARD_D_TARGET_E]], label [[D_TARGET_E]], label [[G]] +; +entry: + br i1 %PredEntry, label %A, label %I + +A: + %outer1 = phi i32 [ 0, %entry ], [ %outer2, %G ] + br label %B + +B: + %inner1 = phi i32 [ 0, %A ], [ %inner2, %F ] + callbr void asm "", "r,!i"(i1 %PredA) to label %D [label %C] + +C: + tail call fastcc void @check(i32 1) #0 + br label %H + +D: + callbr void asm "", "r,!i"(i1 %PredB) to label %E [label %F] + +E: + tail call fastcc void @check(i32 2) #0 + br label %H + +F: + %inner2 = add i32 %inner1, 1 + %cmp1 = icmp ult i32 %inner2, 20 + callbr void asm "", "r,!i"(i1 %cmp1) to label %B [label %G] + +G: + %outer2 = add i32 %outer1, 1 + %cmp2 = icmp ult i32 %outer2, 10 + callbr void asm "", "r,!i"(i1 %cmp2) to label %A [label %I] + +H: + unreachable + +I: + ret void +} + ; A loop with more exit blocks. define void @loop_five_exits(i1 %PredEntry, i1 %PredA, i1 %PredB, i1 %PredC, i1 %PredD) { @@ -341,6 +578,179 @@ L: ret void } +define void @loop_five_exits_callbr(i1 %PredEntry, i1 %PredA, i1 %PredB, i1 %PredC, i1 %PredD) { +; CHECK-LABEL: @loop_five_exits_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[L:%.*]] +; CHECK: A: +; CHECK-NEXT: [[INC1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC2:%.*]], [[I:%.*]] ] +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]]) +; CHECK-NEXT: to label [[A_TARGET_B:%.*]] [label %C] +; CHECK: B: +; CHECK-NEXT: tail call fastcc void @check(i32 1) #[[ATTR0]] +; CHECK-NEXT: br label [[J:%.*]] +; CHECK: C: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB:%.*]]) +; CHECK-NEXT: to label [[C_TARGET_D:%.*]] [label %E] +; CHECK: D: +; CHECK-NEXT: tail call fastcc void @check(i32 2) #[[ATTR0]] +; CHECK-NEXT: br label [[J]] +; CHECK: E: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDC:%.*]]) +; CHECK-NEXT: to label [[E_TARGET_F:%.*]] [label %G] +; CHECK: F: +; CHECK-NEXT: tail call fastcc void @check(i32 3) #[[ATTR0]] +; CHECK-NEXT: br label [[K:%.*]] +; CHECK: G: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDD:%.*]]) +; CHECK-NEXT: to label [[G_TARGET_H:%.*]] [label %I] +; CHECK: H: +; CHECK-NEXT: tail call fastcc void @check(i32 4) #[[ATTR0]] +; CHECK-NEXT: br label [[K]] +; CHECK: I: +; CHECK-NEXT: [[INC2]] = add i32 [[INC1]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC2]], 10 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]]) +; CHECK-NEXT: to label [[A]] [label %I.target.L] +; CHECK: J: +; CHECK-NEXT: br label [[L]] +; CHECK: K: +; CHECK-NEXT: br label [[L]] +; CHECK: L: +; CHECK-NEXT: ret void +; CHECK: A.target.B: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]] +; CHECK: C.target.D: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: E.target.F: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: G.target.H: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: I.target.L: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: loop.exit.guard: +; CHECK-NEXT: [[MERGED_BB_IDX:%.*]] = phi i32 [ 0, [[A_TARGET_B]] ], [ 1, [[C_TARGET_D]] ], [ 2, [[E_TARGET_F]] ], [ 3, [[G_TARGET_H]] ], [ 4, [[I_TARGET_L:%.*]] ] +; CHECK-NEXT: [[B_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 0 +; CHECK-NEXT: br i1 [[B_PREDICATE]], label [[B:%.*]], label [[LOOP_EXIT_GUARD1:%.*]] +; CHECK: loop.exit.guard1: +; CHECK-NEXT: [[D_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 1 +; CHECK-NEXT: br i1 [[D_PREDICATE]], label [[D:%.*]], label [[LOOP_EXIT_GUARD2:%.*]] +; CHECK: loop.exit.guard2: +; CHECK-NEXT: [[F_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 2 +; CHECK-NEXT: br i1 [[F_PREDICATE]], label [[F:%.*]], label [[LOOP_EXIT_GUARD3:%.*]] +; CHECK: loop.exit.guard3: +; CHECK-NEXT: [[H_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 3 +; CHECK-NEXT: br i1 [[H_PREDICATE]], label [[H:%.*]], label [[L]] +; +; BOOLEAN-LABEL: @loop_five_exits_callbr( +; BOOLEAN-NEXT: entry: +; BOOLEAN-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[L:%.*]] +; BOOLEAN: A: +; BOOLEAN-NEXT: [[INC1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC2:%.*]], [[I:%.*]] ] +; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]]) +; BOOLEAN-NEXT: to label [[A_TARGET_B:%.*]] [label %C] +; BOOLEAN: B: +; BOOLEAN-NEXT: tail call fastcc void @check(i32 1) #[[ATTR0]] +; BOOLEAN-NEXT: br label [[J:%.*]] +; BOOLEAN: C: +; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB:%.*]]) +; BOOLEAN-NEXT: to label [[C_TARGET_D:%.*]] [label %E] +; BOOLEAN: D: +; BOOLEAN-NEXT: tail call fastcc void @check(i32 2) #[[ATTR0]] +; BOOLEAN-NEXT: br label [[J]] +; BOOLEAN: E: +; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDC:%.*]]) +; BOOLEAN-NEXT: to label [[E_TARGET_F:%.*]] [label %G] +; BOOLEAN: F: +; BOOLEAN-NEXT: tail call fastcc void @check(i32 3) #[[ATTR0]] +; BOOLEAN-NEXT: br label [[K:%.*]] +; BOOLEAN: G: +; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDD:%.*]]) +; BOOLEAN-NEXT: to label [[G_TARGET_H:%.*]] [label %I] +; BOOLEAN: H: +; BOOLEAN-NEXT: tail call fastcc void @check(i32 4) #[[ATTR0]] +; BOOLEAN-NEXT: br label [[K]] +; BOOLEAN: I: +; BOOLEAN-NEXT: [[INC2]] = add i32 [[INC1]], 1 +; BOOLEAN-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC2]], 10 +; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]]) +; BOOLEAN-NEXT: to label [[A]] [label %I.target.L] +; BOOLEAN: J: +; BOOLEAN-NEXT: br label [[L]] +; BOOLEAN: K: +; BOOLEAN-NEXT: br label [[L]] +; BOOLEAN: L: +; BOOLEAN-NEXT: ret void +; BOOLEAN: A.target.B: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD:%.*]] +; BOOLEAN: C.target.D: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]] +; BOOLEAN: E.target.F: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]] +; BOOLEAN: G.target.H: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]] +; BOOLEAN: I.target.L: +; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]] +; BOOLEAN: loop.exit.guard: +; BOOLEAN-NEXT: [[GUARD_B:%.*]] = phi i1 [ true, [[A_TARGET_B]] ], [ false, [[C_TARGET_D]] ], [ false, [[E_TARGET_F]] ], [ false, [[G_TARGET_H]] ], [ false, [[I_TARGET_L:%.*]] ] +; BOOLEAN-NEXT: [[GUARD_D:%.*]] = phi i1 [ false, [[A_TARGET_B]] ], [ true, [[C_TARGET_D]] ], [ false, [[E_TARGET_F]] ], [ false, [[G_TARGET_H]] ], [ false, [[I_TARGET_L]] ] +; BOOLEAN-NEXT: [[GUARD_F:%.*]] = phi i1 [ false, [[A_TARGET_B]] ], [ false, [[C_TARGET_D]] ], [ true, [[E_TARGET_F]] ], [ false, [[G_TARGET_H]] ], [ false, [[I_TARGET_L]] ] +; BOOLEAN-NEXT: [[GUARD_H:%.*]] = phi i1 [ false, [[A_TARGET_B]] ], [ false, [[C_TARGET_D]] ], [ false, [[E_TARGET_F]] ], [ true, [[G_TARGET_H]] ], [ false, [[I_TARGET_L]] ] +; BOOLEAN-NEXT: br i1 [[GUARD_B]], label [[B:%.*]], label [[LOOP_EXIT_GUARD1:%.*]] +; BOOLEAN: loop.exit.guard1: +; BOOLEAN-NEXT: br i1 [[GUARD_D]], label [[D:%.*]], label [[LOOP_EXIT_GUARD2:%.*]] +; BOOLEAN: loop.exit.guard2: +; BOOLEAN-NEXT: br i1 [[GUARD_F]], label [[F:%.*]], label [[LOOP_EXIT_GUARD3:%.*]] +; BOOLEAN: loop.exit.guard3: +; BOOLEAN-NEXT: br i1 [[GUARD_H]], label [[H:%.*]], label [[L]] +; +entry: + br i1 %PredEntry, label %A, label %L + +A: + %inc1 = phi i32 [ 0, %entry ], [ %inc2, %I ] + callbr void asm "", "r,!i"(i1 %PredA) to label %B [label %C] + +B: + tail call fastcc void @check(i32 1) #0 + br label %J + +C: + callbr void asm "", "r,!i"(i1 %PredB) to label %D [label %E] + +D: + tail call fastcc void @check(i32 2) #0 + br label %J + +E: + callbr void asm "", "r,!i"(i1 %PredC) to label %F [label %G] + +F: + tail call fastcc void @check(i32 3) #0 + br label %K + +G: + callbr void asm "", "r,!i"(i1 %PredD) to label %H [label %I] + +H: + tail call fastcc void @check(i32 4) #0 + br label %K + +I: + %inc2 = add i32 %inc1, 1 + %cmp = icmp ult i32 %inc2, 10 + callbr void asm "", "r,!i"(i1 %cmp) to label %A [label %L] + +J: + br label %L + +K: + br label %L + +L: + ret void +} + declare void @check(i32 noundef %i) #0 diff --git a/llvm/test/Transforms/UnifyLoopExits/nested.ll b/llvm/test/Transforms/UnifyLoopExits/nested.ll index 8fae2c4..2ec576a 100644 --- a/llvm/test/Transforms/UnifyLoopExits/nested.ll +++ b/llvm/test/Transforms/UnifyLoopExits/nested.ll @@ -78,3 +78,145 @@ exit: %exit.phi = phi i32 [%A4.phi, %A5], [%Z, %C] ret void } + +define void @nested_callbr(i1 %PredB3, i1 %PredB4, i1 %PredA4, i1 %PredA3, i32 %X, i32 %Y, i32 %Z) { +; CHECK-LABEL: @nested_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[A1:%.*]] +; CHECK: A1: +; CHECK-NEXT: br label [[B1:%.*]] +; CHECK: B1: +; CHECK-NEXT: br label [[B2:%.*]] +; CHECK: B2: +; CHECK-NEXT: [[X_INC:%.*]] = add i32 [[X:%.*]], 1 +; CHECK-NEXT: br label [[B3:%.*]] +; CHECK: B3: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB3:%.*]]) +; CHECK-NEXT: to label [[B4:%.*]] [label %B3.target.A3] +; CHECK: B4: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB4:%.*]]) +; CHECK-NEXT: to label [[B1]] [label %B4.target.A2] +; CHECK: A2: +; CHECK-NEXT: br label [[A4:%.*]] +; CHECK: A3: +; CHECK-NEXT: br label [[A4]] +; CHECK: A4: +; CHECK-NEXT: [[A4_PHI:%.*]] = phi i32 [ [[Y:%.*]], [[A3:%.*]] ], [ [[X_INC_MOVED:%.*]], [[A2:%.*]] ] +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA4:%.*]]) +; CHECK-NEXT: to label [[A4_TARGET_C:%.*]] [label %A5] +; CHECK: A5: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA3:%.*]]) +; CHECK-NEXT: to label [[A5_TARGET_EXIT:%.*]] [label %A1] +; CHECK: C: +; CHECK-NEXT: br label [[EXIT:%.*]] +; CHECK: exit: +; CHECK-NEXT: [[EXIT_PHI:%.*]] = phi i32 [ [[Z:%.*]], [[C:%.*]] ], [ [[EXIT_PHI_MOVED:%.*]], [[LOOP_EXIT_GUARD:%.*]] ] +; CHECK-NEXT: ret void +; CHECK: A4.target.C: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: A5.target.exit: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: loop.exit.guard: +; CHECK-NEXT: [[EXIT_PHI_MOVED]] = phi i32 [ poison, [[A4_TARGET_C]] ], [ [[A4_PHI]], [[A5_TARGET_EXIT]] ] +; CHECK-NEXT: [[GUARD_C:%.*]] = phi i1 [ true, [[A4_TARGET_C]] ], [ false, [[A5_TARGET_EXIT]] ] +; CHECK-NEXT: br i1 [[GUARD_C]], label [[C]], label [[EXIT]] +; CHECK: B3.target.A3: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD1:%.*]] +; CHECK: B4.target.A2: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD1]] +; CHECK: loop.exit.guard1: +; CHECK-NEXT: [[X_INC_MOVED]] = phi i32 [ [[X_INC]], [[B3_TARGET_A3:%.*]] ], [ [[X_INC]], [[B4_TARGET_A2:%.*]] ] +; CHECK-NEXT: [[GUARD_A3:%.*]] = phi i1 [ true, [[B3_TARGET_A3]] ], [ false, [[B4_TARGET_A2]] ] +; CHECK-NEXT: br i1 [[GUARD_A3]], label [[A3]], label [[A2]] +; +entry: + br label %A1 + +A1: + br label %B1 + +B1: + br label %B2 + +B2: + %X.inc = add i32 %X, 1 + br label %B3 + +B3: + callbr void asm "", "r,!i"(i1 %PredB3) to label %B4 [label %A3] + +B4: + callbr void asm "", "r,!i"(i1 %PredB4) to label %B1 [label %A2] + +A2: + br label %A4 + +A3: + br label %A4 + +A4: + %A4.phi = phi i32 [%Y, %A3], [%X.inc, %A2] + callbr void asm "", "r,!i"(i1 %PredA4) to label %C [label %A5] + +A5: + callbr void asm "", "r,!i"(i1 %PredA3) to label %exit [label %A1] + +C: + br label %exit + +exit: + %exit.phi = phi i32 [%A4.phi, %A5], [%Z, %C] + ret void +} + +; Here, the newly created target loop that connects b to r1 needs to be part of +; the parent loop (the outer loop b participates in). Otherwise, it will be +; regarded as an additional loop entry point to this outer loop. +define void @nested_callbr_multiple_exits() { +; CHECK-LABEL: @nested_callbr_multiple_exits( +; CHECK-NEXT: br label [[A:%.*]] +; CHECK: a: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[B:%.*]] [] +; CHECK: b: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label [[C:%.*]] [label %b.target.b.target.r1] +; CHECK: c: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label [[C_TARGET_E:%.*]] [label %b] +; CHECK: e: +; CHECK-NEXT: callbr void asm "", "!i"() +; CHECK-NEXT: to label [[A]] [label %e.target.r2] +; CHECK: r1: +; CHECK-NEXT: ret void +; CHECK: r2: +; CHECK-NEXT: ret void +; CHECK: b.target.r1: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]] +; CHECK: e.target.r2: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: loop.exit.guard: +; CHECK-NEXT: [[GUARD_R1:%.*]] = phi i1 [ true, [[B_TARGET_R1:%.*]] ], [ false, [[E_TARGET_R2:%.*]] ] +; CHECK-NEXT: br i1 [[GUARD_R1]], label [[R1:%.*]], label [[R2:%.*]] +; CHECK: b.target.b.target.r1: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD1:%.*]] +; CHECK: c.target.e: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD1]] +; CHECK: loop.exit.guard1: +; CHECK-NEXT: [[GUARD_B_TARGET_R1:%.*]] = phi i1 [ true, [[B_TARGET_B_TARGET_R1:%.*]] ], [ false, [[C_TARGET_E]] ] +; CHECK-NEXT: br i1 [[GUARD_B_TARGET_R1]], label [[B_TARGET_R1]], label [[E:%.*]] +; + br label %a +a: + callbr void asm "", ""() to label %b [] +b: + callbr void asm "", "!i"() to label %c [label %r1] +c: + callbr void asm "", "!i"() to label %e [label %b] +e: + callbr void asm "", "!i"() to label %a [label %r2] +r1: + ret void +r2: + ret void +} diff --git a/llvm/test/Transforms/UnifyLoopExits/restore-ssa.ll b/llvm/test/Transforms/UnifyLoopExits/restore-ssa.ll index 3e68df3..ffe8026 100644 --- a/llvm/test/Transforms/UnifyLoopExits/restore-ssa.ll +++ b/llvm/test/Transforms/UnifyLoopExits/restore-ssa.ll @@ -57,6 +57,60 @@ return: ret i32 %phi } +define i32 @exiting-used-in-exit_callbr(ptr %arg1, ptr %arg2) local_unnamed_addr align 2 { +; CHECK-LABEL: @exiting-used-in-exit_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[A:%.*]] [] +; CHECK: A: +; CHECK-NEXT: [[MYTMP42:%.*]] = load i32, ptr [[ARG1:%.*]], align 4 +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[MYTMP42]], 0 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP1]]) +; CHECK-NEXT: to label [[B:%.*]] [label %A.target.return] +; CHECK: B: +; CHECK-NEXT: [[MYTMP41:%.*]] = load i32, ptr [[ARG2:%.*]], align 4 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[MYTMP41]], 0 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]]) +; CHECK-NEXT: to label [[A]] [label %B.target.C] +; CHECK: C: +; CHECK-NEXT: [[INC:%.*]] = add i32 [[MYTMP41_MOVED:%.*]], 1 +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[RETURN:%.*]] [] +; CHECK: return: +; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[INC]], [[C:%.*]] ], [ [[PHI_MOVED:%.*]], [[LOOP_EXIT_GUARD:%.*]] ] +; CHECK-NEXT: ret i32 [[PHI]] +; CHECK: A.target.return: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: B.target.C: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: loop.exit.guard: +; CHECK-NEXT: [[MYTMP41_MOVED]] = phi i32 [ poison, [[A_TARGET_RETURN:%.*]] ], [ [[MYTMP41]], [[B_TARGET_C:%.*]] ] +; CHECK-NEXT: [[PHI_MOVED]] = phi i32 [ [[MYTMP42]], [[A_TARGET_RETURN]] ], [ poison, [[B_TARGET_C]] ] +; CHECK-NEXT: [[GUARD_RETURN:%.*]] = phi i1 [ true, [[A_TARGET_RETURN]] ], [ false, [[B_TARGET_C]] ] +; CHECK-NEXT: br i1 [[GUARD_RETURN]], label [[RETURN]], label [[C]] +; +entry: + callbr void asm "", ""() to label %A [] + +A: + %mytmp42 = load i32, ptr %arg1, align 4 + %cmp1 = icmp slt i32 %mytmp42, 0 + callbr void asm "", "r,!i"(i1 %cmp1) to label %B [label %return] + +B: + %mytmp41 = load i32, ptr %arg2, align 4 + %cmp = icmp slt i32 %mytmp41, 0 + callbr void asm "", "r,!i"(i1 %cmp) to label %A [label %C] + +C: + %inc = add i32 %mytmp41, 1 + callbr void asm "", ""() to label %return [] + +return: + %phi = phi i32 [ %inc, %C ], [ %mytmp42, %A ] + ret i32 %phi +} + ; Loop consists of A, B and C: ; - A is the header ; - A and C are exiting blocks @@ -112,6 +166,63 @@ return: ret i32 0 } +define i32 @internal-used-in-exit_callbr(ptr %arg1, ptr %arg2) local_unnamed_addr align 2 { +; CHECK-LABEL: @internal-used-in-exit_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[MYTMP42:%.*]] = load i32, ptr [[ARG1:%.*]], align 4 +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[A:%.*]] [] +; CHECK: A: +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[MYTMP42]], 0 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP1]]) +; CHECK-NEXT: to label [[B:%.*]] [label %A.target.return] +; CHECK: B: +; CHECK-NEXT: [[MYTMP41:%.*]] = load i32, ptr [[ARG2:%.*]], align 4 +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[C:%.*]] [] +; CHECK: C: +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[MYTMP42]], 0 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]]) +; CHECK-NEXT: to label [[A]] [label %C.target.D] +; CHECK: D: +; CHECK-NEXT: [[INC:%.*]] = add i32 [[MYTMP41_MOVED:%.*]], 1 +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[RETURN:%.*]] [] +; CHECK: return: +; CHECK-NEXT: ret i32 0 +; CHECK: A.target.return: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]] +; CHECK: C.target.D: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: loop.exit.guard: +; CHECK-NEXT: [[MYTMP41_MOVED]] = phi i32 [ poison, [[A_TARGET_RETURN:%.*]] ], [ [[MYTMP41]], [[C_TARGET_D:%.*]] ] +; CHECK-NEXT: [[GUARD_RETURN:%.*]] = phi i1 [ true, [[A_TARGET_RETURN]] ], [ false, [[C_TARGET_D]] ] +; CHECK-NEXT: br i1 [[GUARD_RETURN]], label [[RETURN]], label [[D:%.*]] +; +entry: + %mytmp42 = load i32, ptr %arg1, align 4 + callbr void asm "", ""() to label %A [] + +A: + %cmp1 = icmp slt i32 %mytmp42, 0 + callbr void asm "", "r,!i"(i1 %cmp1) to label %B [label %return] + +B: + %mytmp41 = load i32, ptr %arg2, align 4 + callbr void asm "", ""() to label %C [] + +C: + %cmp = icmp slt i32 %mytmp42, 0 + callbr void asm "", "r,!i"(i1 %cmp) to label %A [label %D] + +D: + %inc = add i32 %mytmp41, 1 + callbr void asm "", ""() to label %return [] + +return: + ret i32 0 +} + ; Loop consists of A, B and C: ; - A is the header ; - A and C are exiting blocks @@ -172,6 +283,68 @@ return: ret i32 %phi } +define i32 @mixed-use-in-exit_callbr(ptr %arg1, ptr %arg2) local_unnamed_addr align 2 { +; CHECK-LABEL: @mixed-use-in-exit_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[MYTMP42:%.*]] = load i32, ptr [[ARG1:%.*]], align 4 +; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[MYTMP42]], 0 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP2]]) +; CHECK-NEXT: to label [[A:%.*]] [label %return] +; CHECK: A: +; CHECK-NEXT: [[MYTMP43:%.*]] = add i32 [[MYTMP42]], 1 +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[MYTMP42]], 0 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP1]]) +; CHECK-NEXT: to label [[B:%.*]] [label %A.target.return] +; CHECK: B: +; CHECK-NEXT: [[MYTMP41:%.*]] = load i32, ptr [[ARG2:%.*]], align 4 +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[C:%.*]] [] +; CHECK: C: +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[MYTMP42]], 0 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]]) +; CHECK-NEXT: to label [[A]] [label %C.target.D] +; CHECK: D: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[RETURN:%.*]] [] +; CHECK: return: +; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[MYTMP41_MOVED:%.*]], [[D:%.*]] ], [ [[MYTMP42]], [[ENTRY:%.*]] ], [ [[PHI_MOVED:%.*]], [[LOOP_EXIT_GUARD:%.*]] ] +; CHECK-NEXT: ret i32 [[PHI]] +; CHECK: A.target.return: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: C.target.D: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: loop.exit.guard: +; CHECK-NEXT: [[MYTMP41_MOVED]] = phi i32 [ poison, [[A_TARGET_RETURN:%.*]] ], [ [[MYTMP41]], [[C_TARGET_D:%.*]] ] +; CHECK-NEXT: [[PHI_MOVED]] = phi i32 [ [[MYTMP43]], [[A_TARGET_RETURN]] ], [ poison, [[C_TARGET_D]] ] +; CHECK-NEXT: [[GUARD_RETURN:%.*]] = phi i1 [ true, [[A_TARGET_RETURN]] ], [ false, [[C_TARGET_D]] ] +; CHECK-NEXT: br i1 [[GUARD_RETURN]], label [[RETURN]], label [[D]] +; +entry: + %mytmp42 = load i32, ptr %arg1, align 4 + %cmp2 = icmp slt i32 %mytmp42, 0 + callbr void asm "", "r,!i"(i1 %cmp2) to label %A [label %return] + +A: + %mytmp43 = add i32 %mytmp42, 1 + %cmp1 = icmp slt i32 %mytmp42, 0 + callbr void asm "", "r,!i"(i1 %cmp1) to label %B [label %return] + +B: + %mytmp41 = load i32, ptr %arg2, align 4 + callbr void asm "", ""() to label %C [] + +C: + %cmp = icmp slt i32 %mytmp42, 0 + callbr void asm "", "r,!i"(i1 %cmp) to label %A [label %D] + +D: + callbr void asm "", ""() to label %return [] + +return: + %phi = phi i32 [ %mytmp41, %D ], [ %mytmp43, %A ], [%mytmp42, %entry] + ret i32 %phi +} + ; Loop consists of A, B and C: ; - A is the header ; - A and C are exiting blocks @@ -236,3 +409,66 @@ return: %phi = phi i32 [ %mytmp41, %D ], [ %mytmp42, %E ] ret i32 %phi } + +define i32 @phi-via-external-block_callbr(ptr %arg1, ptr %arg2) local_unnamed_addr align 2 { +; CHECK-LABEL: @phi-via-external-block_callbr( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[MYTMP42:%.*]] = load i32, ptr [[ARG1:%.*]], align 4 +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[A:%.*]] [] +; CHECK: A: +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[MYTMP42]], 0 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP1]]) +; CHECK-NEXT: to label [[B:%.*]] [label %A.target.E] +; CHECK: B: +; CHECK-NEXT: [[MYTMP41:%.*]] = load i32, ptr [[ARG2:%.*]], align 4 +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[C:%.*]] [] +; CHECK: C: +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[MYTMP42]], 0 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]]) +; CHECK-NEXT: to label [[A]] [label %C.target.D] +; CHECK: D: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[RETURN:%.*]] [] +; CHECK: E: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label [[RETURN]] [] +; CHECK: return: +; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[MYTMP41_MOVED:%.*]], [[D:%.*]] ], [ [[MYTMP42]], [[E:%.*]] ] +; CHECK-NEXT: ret i32 [[PHI]] +; CHECK: A.target.E: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]] +; CHECK: C.target.D: +; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]] +; CHECK: loop.exit.guard: +; CHECK-NEXT: [[MYTMP41_MOVED]] = phi i32 [ poison, [[A_TARGET_E:%.*]] ], [ [[MYTMP41]], [[C_TARGET_D:%.*]] ] +; CHECK-NEXT: [[GUARD_E:%.*]] = phi i1 [ true, [[A_TARGET_E]] ], [ false, [[C_TARGET_D]] ] +; CHECK-NEXT: br i1 [[GUARD_E]], label [[E]], label [[D]] +; +entry: + %mytmp42 = load i32, ptr %arg1, align 4 + callbr void asm "", ""() to label %A [] + +A: + %cmp1 = icmp slt i32 %mytmp42, 0 + callbr void asm "", "r,!i"(i1 %cmp1) to label %B [label %E] + +B: + %mytmp41 = load i32, ptr %arg2, align 4 + callbr void asm "", ""() to label %C [] + +C: + %cmp = icmp slt i32 %mytmp42, 0 + callbr void asm "", "r,!i"(i1 %cmp) to label %A [label %D] + +D: + callbr void asm "", ""() to label %return [] + +E: + callbr void asm "", ""() to label %return [] + +return: + %phi = phi i32 [ %mytmp41, %D ], [ %mytmp42, %E ] + ret i32 %phi +} diff --git a/llvm/test/Transforms/UnifyLoopExits/undef-phis.ll b/llvm/test/Transforms/UnifyLoopExits/undef-phis.ll index 05f50fc..e65e254 100644 --- a/llvm/test/Transforms/UnifyLoopExits/undef-phis.ll +++ b/llvm/test/Transforms/UnifyLoopExits/undef-phis.ll @@ -56,3 +56,71 @@ mbb5291: ; preds = %mbb4321 store volatile [2 x i32] %i5293, ptr addrspace(5) null, align 4 ret void } + +define fastcc void @undef_phi_callbr(i64 %i5247, i1 %i4530, i1 %i4936.not) { +; CHECK-LABEL: define fastcc void @undef_phi_callbr( +; CHECK-SAME: i64 [[I5247:%.*]], i1 [[I4530:%.*]], i1 [[I4936_NOT:%.*]]) { +; CHECK-NEXT: [[MBB:.*:]] +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label %[[MBB3932:.*]] [] +; CHECK: [[MBB3932]]: +; CHECK-NEXT: callbr void asm "", ""() +; CHECK-NEXT: to label %[[MBB4454:.*]] [] +; CHECK: [[MBB4321:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[I5247]] to i32 +; CHECK-NEXT: [[I5290:%.*]] = icmp eq i32 [[TMP0]], 0 +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[I5290]]) +; CHECK-NEXT: to label %[[MBB3932]] [label %mbb4321.target.mbb5291] +; CHECK: [[MBB4454]]: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[I4530]]) +; CHECK-NEXT: to label %[[MBB4535:.*]] [label %mbb4454.target.mbb4454.target.mbb4531] +; CHECK: [[MBB4531:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[MBB4535]]: +; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[I4936_NOT]]) +; CHECK-NEXT: to label %[[MBB4535_TARGET_MBB4321:.*]] [label %mbb4454] +; CHECK: [[MBB5291:.*]]: +; CHECK-NEXT: [[I5293:%.*]] = insertvalue [2 x i32] zeroinitializer, i32 [[DOTMOVED:%.*]], 1 +; CHECK-NEXT: store volatile [2 x i32] [[I5293]], ptr addrspace(5) null, align 4 +; CHECK-NEXT: ret void +; CHECK: [[MBB4454_TARGET_MBB4531:.*]]: +; CHECK-NEXT: br label %[[LOOP_EXIT_GUARD:.*]] +; CHECK: [[MBB4321_TARGET_MBB5291:.*]]: +; CHECK-NEXT: br label %[[LOOP_EXIT_GUARD]] +; CHECK: [[LOOP_EXIT_GUARD]]: +; CHECK-NEXT: [[DOTMOVED]] = phi i32 [ poison, %[[MBB4454_TARGET_MBB4531]] ], [ [[TMP0]], %[[MBB4321_TARGET_MBB5291]] ] +; CHECK-NEXT: [[GUARD_MBB4531:%.*]] = phi i1 [ true, %[[MBB4454_TARGET_MBB4531]] ], [ false, %[[MBB4321_TARGET_MBB5291]] ] +; CHECK-NEXT: br i1 [[GUARD_MBB4531]], label %[[MBB4531]], label %[[MBB5291]] +; CHECK: [[MBB4454_TARGET_MBB4454_TARGET_MBB4531:.*]]: +; CHECK-NEXT: br label %[[LOOP_EXIT_GUARD1:.*]] +; CHECK: [[MBB4535_TARGET_MBB4321]]: +; CHECK-NEXT: br label %[[LOOP_EXIT_GUARD1]] +; CHECK: [[LOOP_EXIT_GUARD1]]: +; CHECK-NEXT: [[GUARD_MBB4454_TARGET_MBB4531:%.*]] = phi i1 [ true, %[[MBB4454_TARGET_MBB4454_TARGET_MBB4531]] ], [ false, %[[MBB4535_TARGET_MBB4321]] ] +; CHECK-NEXT: br i1 [[GUARD_MBB4454_TARGET_MBB4531]], label %[[MBB4454_TARGET_MBB4531]], label %[[MBB4321]] +; +mbb: + callbr void asm "", ""() to label %mbb3932 [] + +mbb3932: ; preds = %mbb4321, %mbb + callbr void asm "", ""() to label %mbb4454 [] + +mbb4321: ; preds = %mbb4535 + %0 = trunc i64 %i5247 to i32 + %i5290 = icmp eq i32 %0, 0 + callbr void asm "", "r,!i"(i1 %i5290) to label %mbb3932 [label %mbb5291] + +mbb4454: ; preds = %mbb4535, %mbb3932 + callbr void asm "", "r,!i"(i1 %i4530) to label %mbb4535 [label %mbb4531] + +mbb4531: ; preds = %mbb4454 + ret void + +mbb4535: ; preds = %mbb4454 + callbr void asm "", "r,!i"(i1 %i4936.not) to label %mbb4321 [label %mbb4454] + +mbb5291: ; preds = %mbb4321 + %i5293 = insertvalue [2 x i32] zeroinitializer, i32 %0, 1 + store volatile [2 x i32] %i5293, ptr addrspace(5) null, align 4 + ret void +} diff --git a/llvm/test/Transforms/Util/PredicateInfo/unnamed-types.ll b/llvm/test/Transforms/Util/PredicateInfo/unnamed-types.ll index d9f6aed..faf4bec 100644 --- a/llvm/test/Transforms/Util/PredicateInfo/unnamed-types.ll +++ b/llvm/test/Transforms/Util/PredicateInfo/unnamed-types.ll @@ -6,13 +6,11 @@ ; Check we can use ssa.copy with unnamed types. ; CHECK-LABEL: bb: -; CHECK: Has predicate info ; CHECK: branch predicate info { TrueEdge: 1 Comparison: %cmp1 = icmp ne ptr %arg, null Edge: [label %bb,label %bb1], RenamedOp: %arg } ; CHECK-NEXT: %arg.0 = bitcast ptr %arg to ptr ; CHECK-LABEL: bb1: -; CHECK: Has predicate info -; CHECK-NEXT: branch predicate info { TrueEdge: 0 Comparison: %cmp2 = icmp ne ptr null, %tmp Edge: [label %bb1,label %bb3], RenamedOp: %tmp } +; CHECK: branch predicate info { TrueEdge: 0 Comparison: %cmp2 = icmp ne ptr null, %tmp Edge: [label %bb1,label %bb3], RenamedOp: %tmp } ; CHECK-NEXT: %tmp.0 = bitcast ptr %tmp to ptr define void @f0(ptr %arg, ptr %tmp) { diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py index 781240a..11a5a57 100644 --- a/llvm/test/lit.cfg.py +++ b/llvm/test/lit.cfg.py @@ -753,10 +753,17 @@ if not hasattr(sys, "getwindowsversion") or sys.getwindowsversion().build >= 170 config.available_features.add("unix-sockets") # .debug_frame is not emitted for targeting Windows x64, aarch64/arm64, AIX, or Apple Silicon Mac. -if not re.match( - r"^(x86_64|aarch64|arm64|powerpc|powerpc64).*-(windows-cygnus|windows-gnu|windows-msvc|aix)", - config.target_triple, -) and not re.match(r"^arm64(e)?-apple-(macos|darwin)", config.target_triple): +if ( + not re.match( + r"^(x86_64|aarch64|arm64|powerpc|powerpc64).*-(windows-cygnus|windows-gnu|windows-msvc|aix)", + config.target_triple, + ) + and not re.match( + r"^arm64(e)?-apple-(macos|darwin)", + config.target_triple, + ) + and not re.match(r".*-zos.*", config.target_triple) +): config.available_features.add("debug_frame") if config.enable_backtrace: diff --git a/llvm/test/tools/dxil-dis/di-subprogram.ll b/llvm/test/tools/dxil-dis/di-subprogram.ll index 8255d39..912421f 100644 --- a/llvm/test/tools/dxil-dis/di-subprogram.ll +++ b/llvm/test/tools/dxil-dis/di-subprogram.ll @@ -3,8 +3,6 @@ target triple = "dxil-unknown-shadermodel6.7-library" !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!3, !4} -!llvm.used = !{!5} -!llvm.lines = !{!13, !14, !15, !16} ; CHECK: !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "Some Compiler", isOptimized: true, runtimeVersion: 0, emissionKind: 1, enums: !2) !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "Some Compiler", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, splitDebugInlining: false, nameTableKind: None) @@ -16,38 +14,3 @@ target triple = "dxil-unknown-shadermodel6.7-library" !3 = !{i32 2, !"Dwarf Version", i32 4} ; CHECK: !4 = !{i32 2, !"Debug Info Version", i32 3} !4 = !{i32 2, !"Debug Info Version", i32 3} - -; CHECK: !5 = distinct !DISubprogram(name: "fma", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, function: !0, variables: !9) -!5 = distinct !DISubprogram(name: "fma", scope: !1, file: !1, line: 1, type: !6, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !9) - -; CHECK: !6 = !DISubroutineType(types: !7) -!6 = !DISubroutineType(types: !7) - -; CHECK: !7 = !{!8, !8, !8, !8} -!7 = !{!8, !8, !8, !8} - -; CHECK: !8 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float) -!8 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float) - -; CHECK: !9 = !{!10, !11, !12} -!9 = !{!10, !11, !12} - -; CHECK: !10 = !DILocalVariable(tag: DW_TAG_variable, name: "x", arg: 1, scope: !5, file: !1, line: 1, type: !8) -!10 = !DILocalVariable(name: "x", arg: 1, scope: !5, file: !1, line: 1, type: !8) - -; CHECK: !11 = !DILocalVariable(tag: DW_TAG_variable, name: "y", arg: 2, scope: !5, file: !1, line: 1, type: !8) -!11 = !DILocalVariable(name: "y", arg: 2, scope: !5, file: !1, line: 1, type: !8) - -; CHECK: !12 = !DILocalVariable(tag: DW_TAG_variable, name: "z", arg: 3, scope: !5, file: !1, line: 1, type: !8) -!12 = !DILocalVariable(name: "z", arg: 3, scope: !5, file: !1, line: 1, type: !8) - - -; CHECK: !13 = !DILocation(line: 0, scope: !5) -; CHECK: !14 = !DILocation(line: 2, column: 12, scope: !5) -; CHECK: !15 = !DILocation(line: 2, column: 16, scope: !5) -; CHECK: !16 = !DILocation(line: 2, column: 3, scope: !5) - -!13 = !DILocation(line: 0, scope: !5) -!14 = !DILocation(line: 2, column: 12, scope: !5) -!15 = !DILocation(line: 2, column: 16, scope: !5) -!16 = !DILocation(line: 2, column: 3, scope: !5) diff --git a/llvm/test/tools/dxil-dis/di-subrotine.ll b/llvm/test/tools/dxil-dis/di-subrotine.ll deleted file mode 100644 index 285e319..0000000 --- a/llvm/test/tools/dxil-dis/di-subrotine.ll +++ /dev/null @@ -1,12 +0,0 @@ -; RUN: llc --filetype=obj %s -o - | dxil-dis -o - | FileCheck %s -target triple = "dxil-unknown-shadermodel6.7-library" - -!llvm.used = !{!0} - -!0 = !DISubroutineType(types: !1) -!1 = !{!2, !2, !2, !2} -!2 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float) - -; CHECK: !0 = !DISubroutineType(types: !1) -; CHECK: !1 = !{!2, !2, !2, !2} -; CHECK: !2 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float) diff --git a/llvm/test/tools/dxil-dis/md-manystrings.ll b/llvm/test/tools/dxil-dis/md-manystrings.ll index 938e2dd..a7dd595 100644 --- a/llvm/test/tools/dxil-dis/md-manystrings.ll +++ b/llvm/test/tools/dxil-dis/md-manystrings.ll @@ -4,7 +4,7 @@ target triple = "dxil-unknown-shadermodel6.7-library" -!llvm.too_many_strings = !{!0, !1, !2, !3, !4, !5, !6, !7, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31} +!llvm.ident = !{!0, !1, !2, !3, !4, !5, !6, !7, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31} !0 = !{!"String 0"} !1 = !{!"String 1"} @@ -39,7 +39,7 @@ target triple = "dxil-unknown-shadermodel6.7-library" !30 = !{!"String 30"} !31 = !{!"String 31"} -; CHECK: !llvm.too_many_strings = !{!0, !1, !2, !3, !4, !5, !6, !7, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31} +; CHECK: !llvm.ident = !{!0, !1, !2, !3, !4, !5, !6, !7, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31} ; CHECK: !0 = !{!"String 0"} ; CHECK: !1 = !{!"String 1"} ; CHECK: !2 = !{!"String 2"} |
