aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/knownbits-add.mir278
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-post-coalescer.mir16
-rw-r--r--llvm/test/CodeGen/AArch64/combine-sdiv.ll85
-rw-r--r--llvm/test/CodeGen/AArch64/machine-sme-abi-find-insert-pt.mir227
-rw-r--r--llvm/test/CodeGen/AArch64/mir-yaml-has-streaming-mode-changes.ll13
-rw-r--r--llvm/test/CodeGen/AArch64/sme-agnostic-za.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir12
-rw-r--r--llvm/test/CodeGen/AArch64/sme-za-exceptions.ll242
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/PSVResources.ll12
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/vselect.ll125
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/vselect.ll137
-rw-r--r--llvm/test/CodeGen/PowerPC/fmf-propagation.ll90
-rw-r--r--llvm/test/CodeGen/PowerPC/lxvkq-vec-constant.ll307
-rw-r--r--llvm/test/CodeGen/PowerPC/vector-reduce-add.ll17
-rw-r--r--llvm/test/CodeGen/PowerPC/xxeval-vselect-x-eqv.ll63
-rw-r--r--llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nand.ll72
-rw-r--r--llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nor.ll104
-rw-r--r--llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-b.ll64
-rw-r--r--llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-c.ll92
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.ll76
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.mir57
-rw-r--r--llvm/test/CodeGen/SPIRV/FCmpFalse.ll10
-rw-r--r--llvm/test/CodeGen/SPIRV/FCmpFalse_Vec.ll13
-rw-r--r--llvm/test/CodeGen/SPIRV/builtin_duplicate.ll20
-rw-r--r--llvm/test/CodeGen/SPIRV/complex-constexpr.ll21
-rw-r--r--llvm/test/CodeGen/SPIRV/dominator-order.ll25
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/fake_use.ll13
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchange_cl20.ll84
-rw-r--r--llvm/test/CodeGen/SystemZ/htm-intrinsics.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/inline-asm-flag-output-01.ll738
-rw-r--r--llvm/test/CodeGen/SystemZ/inline-asm-flag-output-02.ll1665
-rw-r--r--llvm/test/CodeGen/WebAssembly/bulk-memory.ll97
-rw-r--r--llvm/test/CodeGen/WebAssembly/bulk-memory64.ll91
-rw-r--r--llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll109
-rw-r--r--llvm/test/CodeGen/X86/apx/cf.ll17
-rw-r--r--llvm/test/CodeGen/X86/cpus-intel.ll2
-rw-r--r--llvm/test/CodeGen/X86/isel-fpclass.ll433
-rw-r--r--llvm/test/CodeGen/X86/masked_gather_scatter.ll60
-rw-r--r--llvm/test/CodeGen/X86/pr160612.ll74
-rw-r--r--llvm/test/CodeGen/X86/setcc-wide-types.ll65
-rw-r--r--llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll4
-rw-r--r--llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll52
42 files changed, 5032 insertions, 658 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-add.mir b/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-add.mir
new file mode 100644
index 0000000..824ada1
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-add.mir
@@ -0,0 +1,278 @@
+# NOTE: Assertions have been autogenerated by utils/update_givaluetracking_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=aarch64 -passes="print<gisel-value-tracking>" -filetype=null %s 2>&1 | FileCheck %s
+
+---
+name: Cst
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @Cst
+ ; CHECK-NEXT: %0:_ KnownBits:00000010 SignBits:6
+ ; CHECK-NEXT: %1:_ KnownBits:00011000 SignBits:3
+ ; CHECK-NEXT: %2:_ KnownBits:00011010 SignBits:3
+ %0:_(s8) = G_CONSTANT i8 2
+ %1:_(s8) = G_CONSTANT i8 24
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: CstZero
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @CstZero
+ ; CHECK-NEXT: %0:_ KnownBits:00000001 SignBits:7
+ ; CHECK-NEXT: %1:_ KnownBits:11111111 SignBits:8
+ ; CHECK-NEXT: %2:_ KnownBits:00000000 SignBits:8
+ %0:_(s8) = G_CONSTANT i8 1
+ %1:_(s8) = G_CONSTANT i8 255
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: CstNegOne
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @CstNegOne
+ ; CHECK-NEXT: %0:_ KnownBits:00000000 SignBits:8
+ ; CHECK-NEXT: %1:_ KnownBits:11111111 SignBits:8
+ ; CHECK-NEXT: %2:_ KnownBits:11111111 SignBits:8
+ %0:_(s8) = G_CONSTANT i8 0
+ %1:_(s8) = G_CONSTANT i8 255
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: CstSeven
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @CstSeven
+ ; CHECK-NEXT: %0:_ KnownBits:00001000 SignBits:4
+ ; CHECK-NEXT: %1:_ KnownBits:11111111 SignBits:8
+ ; CHECK-NEXT: %2:_ KnownBits:00000111 SignBits:5
+ %0:_(s8) = G_CONSTANT i8 8
+ %1:_(s8) = G_CONSTANT i8 255
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: CstNeg
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @CstNeg
+ ; CHECK-NEXT: %0:_ KnownBits:11100000 SignBits:3
+ ; CHECK-NEXT: %1:_ KnownBits:00000010 SignBits:6
+ ; CHECK-NEXT: %2:_ KnownBits:11100010 SignBits:3
+ %0:_(s8) = G_CONSTANT i8 224
+ %1:_(s8) = G_CONSTANT i8 2
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: ScalarVar
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @ScalarVar
+ ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:???????? SignBits:1
+ ; CHECK-NEXT: %2:_ KnownBits:???????? SignBits:1
+ %0:_(s8) = COPY $b0
+ %1:_(s8) = COPY $b1
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: ScalarRhsEarlyOut
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @ScalarRhsEarlyOut
+ ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:00000011 SignBits:6
+ ; CHECK-NEXT: %2:_ KnownBits:???????? SignBits:1
+ %0:_(s8) = COPY $b0
+ %1:_(s8) = G_CONSTANT i8 3
+ %2:_(s8) = G_ADD %0, %1
+...
+---
+name: ScalarNonNegative
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @ScalarNonNegative
+ ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:00001111 SignBits:4
+ ; CHECK-NEXT: %2:_ KnownBits:0000???? SignBits:4
+ ; CHECK-NEXT: %3:_ KnownBits:11111111 SignBits:8
+ ; CHECK-NEXT: %4:_ KnownBits:???????? SignBits:4
+ %0:_(s8) = COPY $b0
+ %1:_(s8) = G_CONSTANT i8 15
+ %2:_(s8) = G_AND %0, %1
+ %3:_(s8) = G_CONSTANT i8 255
+ %4:_(s8) = G_ADD %2, %3
+...
+---
+name: ScalarLhsEarlyOut
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @ScalarLhsEarlyOut
+ ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:00000011 SignBits:6
+ ; CHECK-NEXT: %2:_ KnownBits:???????? SignBits:1
+ %0:_(s8) = COPY $b0
+ %1:_(s8) = G_CONSTANT i8 3
+ %2:_(s8) = G_ADD %1, %0
+...
+---
+name: ScalarPartKnown
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @ScalarPartKnown
+ ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:00001111 SignBits:4
+ ; CHECK-NEXT: %2:_ KnownBits:0000???? SignBits:4
+ ; CHECK-NEXT: %3:_ KnownBits:00000101 SignBits:5
+ ; CHECK-NEXT: %4:_ KnownBits:000????? SignBits:3
+ %0:_(s8) = COPY $b0
+ %1:_(s8) = G_CONSTANT i8 15
+ %2:_(s8) = G_AND %0, %1
+ %3:_(s8) = G_CONSTANT i8 5
+ %4:_(s8) = G_ADD %2, %3
+...
+---
+name: VectorCstZero
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorCstZero
+ ; CHECK-NEXT: %0:_ KnownBits:0000000000000001 SignBits:15
+ ; CHECK-NEXT: %1:_ KnownBits:1111111111111111 SignBits:16
+ ; CHECK-NEXT: %2:_ KnownBits:0000000000000001 SignBits:15
+ ; CHECK-NEXT: %3:_ KnownBits:1111111111111111 SignBits:16
+ ; CHECK-NEXT: %4:_ KnownBits:0000000000000000 SignBits:16
+ %0:_(s16) = G_CONSTANT i16 1
+ %1:_(s16) = G_CONSTANT i16 65535
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %0, %0, %0, %0
+ %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1
+ %4:_(<4 x s16>) = G_ADD %2, %3
+...
+---
+name: VectorCstNegOne
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorCstNegOne
+ ; CHECK-NEXT: %0:_ KnownBits:0000000000000000 SignBits:16
+ ; CHECK-NEXT: %1:_ KnownBits:1111111111111111 SignBits:16
+ ; CHECK-NEXT: %2:_ KnownBits:0000000000000000 SignBits:16
+ ; CHECK-NEXT: %3:_ KnownBits:1111111111111111 SignBits:16
+ ; CHECK-NEXT: %4:_ KnownBits:1111111111111111 SignBits:16
+ %0:_(s16) = G_CONSTANT i16 0
+ %1:_(s16) = G_CONSTANT i16 65535
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %0, %0, %0, %0
+ %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1
+ %4:_(<4 x s16>) = G_ADD %2, %3
+...
+---
+name: VectorVar
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorVar
+ ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %2:_ KnownBits:???????????????? SignBits:1
+ %0:_(<4 x s16>) = COPY $d0
+ %1:_(<4 x s16>) = COPY $d1
+ %2:_(<4 x s16>) = G_ADD %0, %1
+...
+---
+name: VectorRhsEarlyOut
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorRhsEarlyOut
+ ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:0000000000000011 SignBits:14
+ ; CHECK-NEXT: %2:_ KnownBits:0000000000000011 SignBits:14
+ ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1
+ %0:_(<4 x s16>) = COPY $d0
+ %1:_(s16) = G_CONSTANT i16 3
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1
+ %3:_(<4 x s16>) = G_ADD %2, %0
+...
+---
+name: VectorNonNegative
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorNonNegative
+ ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:0000000011111111 SignBits:8
+ ; CHECK-NEXT: %2:_ KnownBits:0000000011111111 SignBits:8
+ ; CHECK-NEXT: %3:_ KnownBits:00000000???????? SignBits:8
+ ; CHECK-NEXT: %4:_ KnownBits:1111111111111111 SignBits:16
+ ; CHECK-NEXT: %5:_ KnownBits:1111111111111111 SignBits:16
+ ; CHECK-NEXT: %6:_ KnownBits:???????????????? SignBits:8
+ %0:_(<4 x s16>) = COPY $d0
+ %1:_(s16) = G_CONSTANT i16 255
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1
+ %3:_(<4 x s16>) = G_AND %0, %2
+ %4:_(s16) = G_CONSTANT i16 65535
+ %5:_(<4 x s16>) = G_BUILD_VECTOR %4, %4, %4, %4
+ %6:_(<4 x s16>) = G_ADD %3, %5
+...
+---
+name: VectorLhsEarlyOut
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorLhsEarlyOut
+ ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:0000000000000011 SignBits:14
+ ; CHECK-NEXT: %2:_ KnownBits:0000000000000011 SignBits:14
+ ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1
+ %0:_(<4 x s16>) = COPY $d0
+ %1:_(s16) = G_CONSTANT i16 3
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1
+ %3:_(<4 x s16>) = G_ADD %0, %2
+...
+---
+name: VectorPartKnown
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorPartKnown
+ ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:0000000011111111 SignBits:8
+ ; CHECK-NEXT: %2:_ KnownBits:0000000011111111 SignBits:8
+ ; CHECK-NEXT: %3:_ KnownBits:00000000???????? SignBits:8
+ ; CHECK-NEXT: %4:_ KnownBits:0000000000101010 SignBits:10
+ ; CHECK-NEXT: %5:_ KnownBits:0000000001001010 SignBits:9
+ ; CHECK-NEXT: %6:_ KnownBits:000000000??01010 SignBits:9
+ ; CHECK-NEXT: %7:_ KnownBits:0000000????????? SignBits:7
+ %0:_(<4 x s16>) = COPY $d0
+ %1:_(s16) = G_CONSTANT i16 255
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1
+ %3:_(<4 x s16>) = G_AND %0, %2
+ %4:_(s16) = G_CONSTANT i16 42
+ %5:_(s16) = G_CONSTANT i16 74
+ %6:_(<4 x s16>) = G_BUILD_VECTOR %4, %5, %5, %4
+ %7:_(<4 x s16>) = G_ADD %6, %3
+...
+---
+name: VectorCst36
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorCst36
+ ; CHECK-NEXT: %0:_ KnownBits:0000000000000011 SignBits:14
+ ; CHECK-NEXT: %1:_ KnownBits:0000000000000110 SignBits:13
+ ; CHECK-NEXT: %2:_ KnownBits:0000000000000?1? SignBits:13
+ ; CHECK-NEXT: %3:_ KnownBits:0000000000000?1? SignBits:13
+ ; CHECK-NEXT: %4:_ KnownBits:000000000000???? SignBits:12
+ %0:_(s16) = G_CONSTANT i16 3
+ %1:_(s16) = G_CONSTANT i16 6
+ %2:_(<4 x s16>) = G_BUILD_VECTOR %0, %1, %1, %0
+ %3:_(<4 x s16>) = G_BUILD_VECTOR %0, %1, %1, %0
+ %4:_(<4 x s16>) = G_ADD %2, %3
+...
+
+---
+name: VectorCst3unknown
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: @VectorCst3unknown
+ ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %1:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %2:_ KnownBits:0000000000000011 SignBits:14
+ ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1
+ ; CHECK-NEXT: %4:_ KnownBits:???????????????? SignBits:1
+ %0:_(<4 x s16>) = COPY $d0
+ %1:_(s16) = COPY $h0
+ %2:_(s16) = G_CONSTANT i16 3
+ %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %2, %2, %1
+ %4:_(<4 x s16>) = G_ADD %0, %3
+...
diff --git a/llvm/test/CodeGen/AArch64/aarch64-post-coalescer.mir b/llvm/test/CodeGen/AArch64/aarch64-post-coalescer.mir
new file mode 100644
index 0000000..6540160
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-post-coalescer.mir
@@ -0,0 +1,16 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -mtriple=aarch64 -mattr=+sme -run-pass=aarch64-post-coalescer-pass -o - %s | FileCheck %s
+
+---
+name: foo
+machineFunctionInfo:
+ hasStreamingModeChanges: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: foo
+ ; CHECK: $d0 = COPY undef %0:fpr64
+ ; CHECK-NEXT: FAKE_USE implicit $d0
+ %1:fpr64 = COALESCER_BARRIER_FPR64 undef %1
+ $d0 = COPY %1
+ FAKE_USE implicit $d0
+...
diff --git a/llvm/test/CodeGen/AArch64/combine-sdiv.ll b/llvm/test/CodeGen/AArch64/combine-sdiv.ll
index dc88f94..cca190f 100644
--- a/llvm/test/CodeGen/AArch64/combine-sdiv.ll
+++ b/llvm/test/CodeGen/AArch64/combine-sdiv.ll
@@ -1774,3 +1774,88 @@ define i128 @combine_i128_sdiv_const100(i128 %x) {
%1 = sdiv i128 %x, 100
ret i128 %1
}
+
+; The following only becomes an sdiv_by_one after type legalisation, after which
+; the splatted scalar constant has a different type to the splat vector. This
+; test verifies DAGCombiner does not care about this type difference.
+define <16 x i16> @combine_vec_sdiv_by_one_obfuscated(<16 x i16> %x) "target-features"="+sve" {
+; CHECK-SD-LABEL: combine_vec_sdiv_by_one_obfuscated:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: combine_vec_sdiv_by_one_obfuscated:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: movi v2.2d, #0000000000000000
+; CHECK-GI-NEXT: movi v3.8h, #1
+; CHECK-GI-NEXT: smov w8, v0.h[0]
+; CHECK-GI-NEXT: mov v3.h[0], v2.h[0]
+; CHECK-GI-NEXT: smov w9, v3.h[0]
+; CHECK-GI-NEXT: smov w16, v3.h[7]
+; CHECK-GI-NEXT: sdiv w14, w8, w9
+; CHECK-GI-NEXT: smov w8, v0.h[1]
+; CHECK-GI-NEXT: smov w9, v3.h[1]
+; CHECK-GI-NEXT: sdiv w15, w8, w9
+; CHECK-GI-NEXT: smov w8, v0.h[2]
+; CHECK-GI-NEXT: smov w9, v3.h[2]
+; CHECK-GI-NEXT: sdiv w13, w8, w9
+; CHECK-GI-NEXT: smov w8, v0.h[3]
+; CHECK-GI-NEXT: smov w9, v3.h[3]
+; CHECK-GI-NEXT: sdiv w12, w8, w9
+; CHECK-GI-NEXT: smov w8, v0.h[4]
+; CHECK-GI-NEXT: smov w9, v3.h[4]
+; CHECK-GI-NEXT: sdiv w11, w8, w9
+; CHECK-GI-NEXT: smov w8, v0.h[5]
+; CHECK-GI-NEXT: smov w9, v3.h[5]
+; CHECK-GI-NEXT: sdiv w10, w8, w9
+; CHECK-GI-NEXT: smov w8, v0.h[6]
+; CHECK-GI-NEXT: smov w9, v3.h[6]
+; CHECK-GI-NEXT: movi v3.8h, #1
+; CHECK-GI-NEXT: smov w17, v3.h[0]
+; CHECK-GI-NEXT: smov w18, v3.h[1]
+; CHECK-GI-NEXT: smov w0, v3.h[2]
+; CHECK-GI-NEXT: smov w1, v3.h[3]
+; CHECK-GI-NEXT: smov w2, v3.h[4]
+; CHECK-GI-NEXT: smov w3, v3.h[5]
+; CHECK-GI-NEXT: sdiv w8, w8, w9
+; CHECK-GI-NEXT: smov w9, v0.h[7]
+; CHECK-GI-NEXT: fmov s0, w14
+; CHECK-GI-NEXT: mov v0.h[1], w15
+; CHECK-GI-NEXT: smov w15, v1.h[6]
+; CHECK-GI-NEXT: mov v0.h[2], w13
+; CHECK-GI-NEXT: sdiv w9, w9, w16
+; CHECK-GI-NEXT: smov w16, v1.h[0]
+; CHECK-GI-NEXT: mov v0.h[3], w12
+; CHECK-GI-NEXT: smov w12, v1.h[7]
+; CHECK-GI-NEXT: mov v0.h[4], w11
+; CHECK-GI-NEXT: sdiv w16, w16, w17
+; CHECK-GI-NEXT: smov w17, v1.h[1]
+; CHECK-GI-NEXT: mov v0.h[5], w10
+; CHECK-GI-NEXT: mov v0.h[6], w8
+; CHECK-GI-NEXT: sdiv w17, w17, w18
+; CHECK-GI-NEXT: smov w18, v1.h[2]
+; CHECK-GI-NEXT: fmov s2, w16
+; CHECK-GI-NEXT: smov w16, v3.h[6]
+; CHECK-GI-NEXT: mov v0.h[7], w9
+; CHECK-GI-NEXT: sdiv w18, w18, w0
+; CHECK-GI-NEXT: smov w0, v1.h[3]
+; CHECK-GI-NEXT: mov v2.h[1], w17
+; CHECK-GI-NEXT: sdiv w0, w0, w1
+; CHECK-GI-NEXT: smov w1, v1.h[4]
+; CHECK-GI-NEXT: mov v2.h[2], w18
+; CHECK-GI-NEXT: sdiv w1, w1, w2
+; CHECK-GI-NEXT: smov w2, v1.h[5]
+; CHECK-GI-NEXT: mov v2.h[3], w0
+; CHECK-GI-NEXT: sdiv w14, w2, w3
+; CHECK-GI-NEXT: mov v2.h[4], w1
+; CHECK-GI-NEXT: sdiv w13, w15, w16
+; CHECK-GI-NEXT: smov w15, v3.h[7]
+; CHECK-GI-NEXT: mov v2.h[5], w14
+; CHECK-GI-NEXT: sdiv w10, w12, w15
+; CHECK-GI-NEXT: mov v2.h[6], w13
+; CHECK-GI-NEXT: mov v2.h[7], w10
+; CHECK-GI-NEXT: mov v1.16b, v2.16b
+; CHECK-GI-NEXT: ret
+ %zero_and_ones = shufflevector <16 x i16> zeroinitializer, <16 x i16> splat (i16 1), <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %div = sdiv <16 x i16> %x, %zero_and_ones
+ ret <16 x i16> %div
+}
diff --git a/llvm/test/CodeGen/AArch64/machine-sme-abi-find-insert-pt.mir b/llvm/test/CodeGen/AArch64/machine-sme-abi-find-insert-pt.mir
new file mode 100644
index 0000000..3f174a6
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/machine-sme-abi-find-insert-pt.mir
@@ -0,0 +1,227 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -mtriple=aarch64 -mattr=+sve -mattr=+sme -run-pass=aarch64-machine-sme-abi -verify-machineinstrs %s -o - | FileCheck %s
+
+--- |
+ ; Test moving a state change to be before a $nzcv def
+ define void @move_before_nzcv_def() "aarch64_inout_za" { ret void }
+
+ ; Test moving a state change to a point where $x0 is live
+ define void @move_to_x0_live() "aarch64_inout_za" { ret void }
+
+ ; Test we don't move before a previous state change.
+ define void @do_not_move_before_prior_state_change() "aarch64_za_state_agnostic" { ret void }
+
+ ; Test we don't move into a call sequence.
+ define void @do_not_move_into_call() "aarch64_inout_za" { ret void }
+
+ declare void @clobber()
+ declare void @inout_call() "aarch64_inout_za"
+...
+---
+name: move_before_nzcv_def
+tracksRegLiveness: true
+isSSA: true
+noVRegs: false
+body: |
+ bb.0:
+
+ ; CHECK-LABEL: name: move_before_nzcv_def
+ ; CHECK: [[RDSVLI_XI:%[0-9]+]]:gpr64 = RDSVLI_XI 1, implicit $vg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $sp
+ ; CHECK-NEXT: [[MSUBXrrr:%[0-9]+]]:gpr64 = MSUBXrrr [[RDSVLI_XI]], [[RDSVLI_XI]], [[COPY]]
+ ; CHECK-NEXT: $sp = COPY [[MSUBXrrr]]
+ ; CHECK-NEXT: STPXi [[MSUBXrrr]], [[RDSVLI_XI]], %stack.0, 0
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[ADDXri]]
+ ; CHECK-NEXT: MSR 56965, [[COPY1]]
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: RequiresZASavePseudo
+ ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: MSRpstatesvcrImm1 2, 1, implicit-def $nzcv
+ ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 56965, implicit-def $nzcv
+ ; CHECK-NEXT: $x0 = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: RestoreZAPseudo [[MRS]], $x0, &__arm_tpidr2_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x0
+ ; CHECK-NEXT: MSR 56965, $xzr
+ ; CHECK-NEXT: $nzcv = IMPLICIT_DEF
+ ; CHECK-NEXT: $zab0 = IMPLICIT_DEF
+ ; CHECK-NEXT: FAKE_USE $nzcv
+ ; CHECK-NEXT: RET_ReallyLR
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ RequiresZASavePseudo
+ BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+
+ $nzcv = IMPLICIT_DEF
+ $zab0 = IMPLICIT_DEF
+ FAKE_USE $nzcv
+
+ RET_ReallyLR
+...
+---
+name: move_to_x0_live
+tracksRegLiveness: true
+isSSA: true
+noVRegs: false
+body: |
+ bb.0:
+
+ ; CHECK-LABEL: name: move_to_x0_live
+ ; CHECK: [[RDSVLI_XI:%[0-9]+]]:gpr64 = RDSVLI_XI 1, implicit $vg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $sp
+ ; CHECK-NEXT: [[MSUBXrrr:%[0-9]+]]:gpr64 = MSUBXrrr [[RDSVLI_XI]], [[RDSVLI_XI]], [[COPY]]
+ ; CHECK-NEXT: $sp = COPY [[MSUBXrrr]]
+ ; CHECK-NEXT: STPXi [[MSUBXrrr]], [[RDSVLI_XI]], %stack.0, 0
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[ADDXri]]
+ ; CHECK-NEXT: MSR 56965, [[COPY1]]
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: RequiresZASavePseudo
+ ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: $x0 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: MSRpstatesvcrImm1 2, 1, implicit-def $nzcv
+ ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 56965, implicit-def $nzcv
+ ; CHECK-NEXT: $x0 = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: RestoreZAPseudo [[MRS]], $x0, &__arm_tpidr2_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x0
+ ; CHECK-NEXT: MSR 56965, $xzr
+ ; CHECK-NEXT: $x0 = COPY [[COPY2]]
+ ; CHECK-NEXT: $nzcv = IMPLICIT_DEF
+ ; CHECK-NEXT: FAKE_USE $x0
+ ; CHECK-NEXT: $zab0 = IMPLICIT_DEF
+ ; CHECK-NEXT: FAKE_USE $nzcv
+ ; CHECK-NEXT: RET_ReallyLR
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ RequiresZASavePseudo
+ BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+
+ $x0 = IMPLICIT_DEF
+
+ $nzcv = IMPLICIT_DEF
+ FAKE_USE $x0
+
+ $zab0 = IMPLICIT_DEF
+ FAKE_USE $nzcv
+
+ RET_ReallyLR
+...
+---
+name: do_not_move_before_prior_state_change
+tracksRegLiveness: true
+isSSA: true
+noVRegs: false
+body: |
+ ; CHECK-LABEL: name: do_not_move_before_prior_state_change
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: BL &__arm_sme_state_size, csr_aarch64_sme_abi_support_routines_preservemost_from_x1, implicit-def $lr, implicit $sp, implicit-def $x0
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: $sp = SUBXrx64 $sp, [[COPY]], 24
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $sp
+ ; CHECK-NEXT: $nzcv = IMPLICIT_DEF
+ ; CHECK-NEXT: $zab0 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 55824, implicit-def $nzcv, implicit $nzcv
+ ; CHECK-NEXT: $x0 = COPY [[COPY1]]
+ ; CHECK-NEXT: BL &__arm_sme_save, csr_aarch64_sme_abi_support_routines_preservemost_from_x1, implicit-def $lr, implicit $sp, implicit $x0
+ ; CHECK-NEXT: MSR 55824, [[MRS]], implicit-def $nzcv
+ ; CHECK-NEXT: Bcc 2, %bb.1, implicit $nzcv
+ ; CHECK-NEXT: B %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: liveins: $nzcv
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: FAKE_USE $nzcv
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: RequiresZASavePseudo
+ ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: $x0 = COPY [[COPY1]]
+ ; CHECK-NEXT: BL &__arm_sme_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x1, implicit-def $lr, implicit $sp, implicit $x0
+ ; CHECK-NEXT: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: RequiresZASavePseudo
+ ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: $x0 = COPY [[COPY1]]
+ ; CHECK-NEXT: BL &__arm_sme_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x1, implicit-def $lr, implicit $sp, implicit $x0
+ ; CHECK-NEXT: RET_ReallyLR
+ bb.0:
+ successors: %bb.1, %bb.2
+
+ ; The insertion point can move before the $nzcv def (as that would require
+ ; moving before a $zab0 def -- that requires the ACTIVE state).
+ $nzcv = IMPLICIT_DEF
+ $zab0 = IMPLICIT_DEF
+ Bcc 2, %bb.1, implicit $nzcv
+ B %bb.2
+ ; bb.1 and bb.2 both require ZA saved on entry (to force bb.0's exit bundle to
+ ; pick the LOCAL_SAVED state).
+ bb.1:
+ liveins: $nzcv
+ FAKE_USE $nzcv
+
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ RequiresZASavePseudo
+ BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+
+ RET_ReallyLR
+ bb.2:
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ RequiresZASavePseudo
+ BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+
+ RET_ReallyLR
+...
+---
+name: do_not_move_into_call
+tracksRegLiveness: true
+isSSA: true
+noVRegs: false
+body: |
+ bb.0:
+
+ ; CHECK-LABEL: name: do_not_move_into_call
+ ; CHECK: [[RDSVLI_XI:%[0-9]+]]:gpr64 = RDSVLI_XI 1, implicit $vg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $sp
+ ; CHECK-NEXT: [[MSUBXrrr:%[0-9]+]]:gpr64 = MSUBXrrr [[RDSVLI_XI]], [[RDSVLI_XI]], [[COPY]]
+ ; CHECK-NEXT: $sp = COPY [[MSUBXrrr]]
+ ; CHECK-NEXT: STPXi [[MSUBXrrr]], [[RDSVLI_XI]], %stack.0, 0
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[ADDXri]]
+ ; CHECK-NEXT: MSR 56965, [[COPY1]]
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: RequiresZASavePseudo
+ ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ ; CHECK-NEXT: $nzcv = IMPLICIT_DEF
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 55824, implicit-def $nzcv, implicit $nzcv
+ ; CHECK-NEXT: MSRpstatesvcrImm1 2, 1, implicit-def $nzcv
+ ; CHECK-NEXT: [[MRS1:%[0-9]+]]:gpr64 = MRS 56965, implicit-def $nzcv
+ ; CHECK-NEXT: $x0 = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: RestoreZAPseudo [[MRS1]], $x0, &__arm_tpidr2_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x0
+ ; CHECK-NEXT: MSR 56965, $xzr
+ ; CHECK-NEXT: MSR 55824, [[MRS]], implicit-def $nzcv
+ ; CHECK-NEXT: $zab0 = IMPLICIT_DEF
+ ; CHECK-NEXT: FAKE_USE $nzcv
+ ; CHECK-NEXT: RET_ReallyLR
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ RequiresZASavePseudo
+ BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+
+ ; This is artificial test where NZCV is def'd inside a call, so we can't
+ ; move the insert point before it's definition.
+ $nzcv = IMPLICIT_DEF
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+
+ $zab0 = IMPLICIT_DEF
+ FAKE_USE $nzcv
+
+ RET_ReallyLR
+...
diff --git a/llvm/test/CodeGen/AArch64/mir-yaml-has-streaming-mode-changes.ll b/llvm/test/CodeGen/AArch64/mir-yaml-has-streaming-mode-changes.ll
new file mode 100644
index 0000000..8f1fe5c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/mir-yaml-has-streaming-mode-changes.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mtriple=aarch64 -mattr=+sme -stop-after=aarch64-isel < %s | FileCheck %s
+
+target triple = "aarch64"
+
+declare void @foo() "aarch64_pstate_sm_enabled"
+
+define dso_local void @bar() local_unnamed_addr {
+; CHECK-LABEL: name: bar
+; CHECK: hasStreamingModeChanges: true
+entry:
+ tail call void @foo() "aarch64_pstate_sm_enabled"
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll b/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll
index e3007a3..e4f9efa 100644
--- a/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll
+++ b/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll
@@ -391,11 +391,9 @@ define void @agnostic_za_buffer_alloc_with_stack_probes() nounwind "aarch64_za_s
; CHECK-NEWLOWERING-NEXT: sub x19, x8, x0
; CHECK-NEWLOWERING-NEXT: .LBB7_1: // =>This Inner Loop Header: Depth=1
; CHECK-NEWLOWERING-NEXT: sub sp, sp, #16, lsl #12 // =65536
-; CHECK-NEWLOWERING-NEXT: cmp sp, x19
; CHECK-NEWLOWERING-NEXT: mov x0, x19
-; CHECK-NEWLOWERING-NEXT: mrs x8, NZCV
; CHECK-NEWLOWERING-NEXT: bl __arm_sme_save
-; CHECK-NEWLOWERING-NEXT: msr NZCV, x8
+; CHECK-NEWLOWERING-NEXT: cmp sp, x19
; CHECK-NEWLOWERING-NEXT: b.le .LBB7_3
; CHECK-NEWLOWERING-NEXT: // %bb.2: // in Loop: Header=BB7_1 Depth=1
; CHECK-NEWLOWERING-NEXT: mov x0, x19
diff --git a/llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir b/llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir
index 18764d5..9f33c06 100644
--- a/llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir
+++ b/llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir
@@ -62,14 +62,12 @@ body: |
; CHECK-NEXT: RequiresZASavePseudo
; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
- ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY1]], 101, 0, implicit-def $nzcv
- ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 55824, implicit-def $nzcv, implicit $nzcv
; CHECK-NEXT: MSRpstatesvcrImm1 2, 1, implicit-def $nzcv
; CHECK-NEXT: [[MRS1:%[0-9]+]]:gpr64 = MRS 56965, implicit-def $nzcv
; CHECK-NEXT: $x0 = ADDXri %stack.0, 0, 0
; CHECK-NEXT: RestoreZAPseudo [[MRS1]], $x0, &__arm_tpidr2_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x0
; CHECK-NEXT: MSR 56965, $xzr
- ; CHECK-NEXT: MSR 55824, [[MRS]], implicit-def $nzcv
+ ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY1]], 101, 0, implicit-def $nzcv
; CHECK-NEXT: Bcc 11, %bb.2, implicit $nzcv
; CHECK-NEXT: B %bb.1
; CHECK-NEXT: {{ $}}
@@ -116,16 +114,14 @@ body: |
# CHECK-ASM-LABEL: cmp_branch
# CHECK-ASM: msr TPIDR2_EL0, x10
# CHECK-ASM-NEXT: bl clobber
-# CHECK-ASM-NEXT: cmp w20, #101
-# CHECK-ASM-NEXT: mrs x8, NZCV
# CHECK-ASM-NEXT: smstart za
-# CHECK-ASM-NEXT: mrs x9, TPIDR2_EL0
+# CHECK-ASM-NEXT: mrs x8, TPIDR2_EL0
# CHECK-ASM-NEXT: sub x0, x29, #16
-# CHECK-ASM-NEXT: cbnz x9, .LBB0_2
+# CHECK-ASM-NEXT: cbnz x8, .LBB0_2
# CHECK-ASM: bl __arm_tpidr2_restore
# CHECK-ASM-NEXT: .LBB0_2:
+# CHECK-ASM-NEXT: cmp w20, #101
# CHECK-ASM-NEXT: msr TPIDR2_EL0, xzr
-# CHECK-ASM-NEXT: msr NZCV, x8
# CHECK-ASM-NEXT: b.lt .LBB0_4
# CHECK-ASM: bl inout_call
# CHECK-ASM-NEXT: .LBB0_4:
diff --git a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll
index b6dee97e..b8d6c88 100644
--- a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll
+++ b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll
@@ -732,6 +732,247 @@ exit:
ret void
}
+; This example corresponds to:
+;
+; __arm_agnostic("sme_za_state") void try_catch_agnostic_za_invoke()
+; {
+; try {
+; agnostic_za_call();
+; } catch(...) {
+; }
+; }
+;
+; In this example we preserve all SME state enabled by PSTATE.ZA using
+; `__arm_sme_save` before agnostic_za_call(). This is because on all normal
+; returns from an agnostic ZA function ZA state should be preserved. That means
+; we need to make sure ZA state is saved in case agnostic_za_call() throws, and
+; we need to restore ZA state after unwinding to the catch block.
+
+define void @try_catch_agnostic_za_invoke() "aarch64_za_state_agnostic" personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: try_catch_agnostic_za_invoke:
+; CHECK: .Lfunc_begin5:
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-NEXT: .cfi_lsda 28, .Lexception5
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: .cfi_def_cfa w29, 32
+; CHECK-NEXT: .cfi_offset w19, -16
+; CHECK-NEXT: .cfi_offset w30, -24
+; CHECK-NEXT: .cfi_offset w29, -32
+; CHECK-NEXT: bl __arm_sme_state_size
+; CHECK-NEXT: sub sp, sp, x0
+; CHECK-NEXT: mov x19, sp
+; CHECK-NEXT: .Ltmp15: // EH_LABEL
+; CHECK-NEXT: mov x0, x19
+; CHECK-NEXT: bl __arm_sme_save
+; CHECK-NEXT: bl agnostic_za_call
+; CHECK-NEXT: .Ltmp16: // EH_LABEL
+; CHECK-NEXT: .LBB5_1: // %exit
+; CHECK-NEXT: mov x0, x19
+; CHECK-NEXT: bl __arm_sme_restore
+; CHECK-NEXT: mov sp, x29
+; CHECK-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB5_2: // %catch
+; CHECK-NEXT: .Ltmp17: // EH_LABEL
+; CHECK-NEXT: bl __cxa_begin_catch
+; CHECK-NEXT: bl __cxa_end_catch
+; CHECK-NEXT: b .LBB5_1
+;
+; CHECK-SDAG-LABEL: try_catch_agnostic_za_invoke:
+; CHECK-SDAG: .Lfunc_begin5:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception5
+; CHECK-SDAG-NEXT: // %bb.0: // %entry
+; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-SDAG-NEXT: mov x29, sp
+; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32
+; CHECK-SDAG-NEXT: .cfi_offset w19, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -24
+; CHECK-SDAG-NEXT: .cfi_offset w29, -32
+; CHECK-SDAG-NEXT: bl __arm_sme_state_size
+; CHECK-SDAG-NEXT: sub sp, sp, x0
+; CHECK-SDAG-NEXT: mov x19, sp
+; CHECK-SDAG-NEXT: .Ltmp15: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_save
+; CHECK-SDAG-NEXT: bl agnostic_za_call
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: .Ltmp16: // EH_LABEL
+; CHECK-SDAG-NEXT: .LBB5_1: // %exit
+; CHECK-SDAG-NEXT: mov sp, x29
+; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: ret
+; CHECK-SDAG-NEXT: .LBB5_2: // %catch
+; CHECK-SDAG-NEXT: .Ltmp17: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x1, x0
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_save
+; CHECK-SDAG-NEXT: mov x0, x1
+; CHECK-SDAG-NEXT: bl __cxa_begin_catch
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_save
+; CHECK-SDAG-NEXT: bl __cxa_end_catch
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: b .LBB5_1
+entry:
+ invoke void @agnostic_za_call()
+ to label %exit unwind label %catch
+
+catch:
+ %eh_info = landingpad { ptr, i32 }
+ catch ptr null
+ %exception_ptr = extractvalue { ptr, i32 } %eh_info, 0
+ tail call ptr @__cxa_begin_catch(ptr %exception_ptr)
+ tail call void @__cxa_end_catch()
+ br label %exit
+
+exit:
+ ret void
+}
+
+; This is the same `try_catch_agnostic_za_invoke`, but shows a lazy save would
+; also need to be committed in a shared-ZA function calling an agnostic-ZA function.
+define void @try_catch_inout_za_agnostic_za_callee() "aarch64_inout_za" personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: try_catch_inout_za_agnostic_za_callee:
+; CHECK: .Lfunc_begin6:
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-NEXT: .cfi_lsda 28, .Lexception6
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa w29, 16
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdsvl x8, #1
+; CHECK-NEXT: mov x9, sp
+; CHECK-NEXT: msub x9, x8, x8, x9
+; CHECK-NEXT: mov sp, x9
+; CHECK-NEXT: stp x9, x8, [x29, #-16]
+; CHECK-NEXT: .Ltmp18: // EH_LABEL
+; CHECK-NEXT: sub x8, x29, #16
+; CHECK-NEXT: msr TPIDR2_EL0, x8
+; CHECK-NEXT: bl agnostic_za_call
+; CHECK-NEXT: .Ltmp19: // EH_LABEL
+; CHECK-NEXT: .LBB6_1: // %exit
+; CHECK-NEXT: smstart za
+; CHECK-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-NEXT: sub x0, x29, #16
+; CHECK-NEXT: cbnz x8, .LBB6_3
+; CHECK-NEXT: // %bb.2: // %exit
+; CHECK-NEXT: bl __arm_tpidr2_restore
+; CHECK-NEXT: .LBB6_3: // %exit
+; CHECK-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-NEXT: mov sp, x29
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB6_4: // %catch
+; CHECK-NEXT: .Ltmp20: // EH_LABEL
+; CHECK-NEXT: bl __cxa_begin_catch
+; CHECK-NEXT: bl __cxa_end_catch
+; CHECK-NEXT: b .LBB6_1
+;
+; CHECK-SDAG-LABEL: try_catch_inout_za_agnostic_za_callee:
+; CHECK-SDAG: .Lfunc_begin6:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception6
+; CHECK-SDAG-NEXT: // %bb.0: // %entry
+; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-SDAG-NEXT: mov x29, sp
+; CHECK-SDAG-NEXT: sub sp, sp, #16
+; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32
+; CHECK-SDAG-NEXT: .cfi_offset w19, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -24
+; CHECK-SDAG-NEXT: .cfi_offset w29, -32
+; CHECK-SDAG-NEXT: rdsvl x8, #1
+; CHECK-SDAG-NEXT: mov x9, sp
+; CHECK-SDAG-NEXT: msub x9, x8, x8, x9
+; CHECK-SDAG-NEXT: mov sp, x9
+; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16]
+; CHECK-SDAG-NEXT: .Ltmp18: // EH_LABEL
+; CHECK-SDAG-NEXT: sub x19, x29, #16
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl agnostic_za_call
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB6_2
+; CHECK-SDAG-NEXT: // %bb.1: // %entry
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB6_2: // %entry
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: .Ltmp19: // EH_LABEL
+; CHECK-SDAG-NEXT: .LBB6_3: // %exit
+; CHECK-SDAG-NEXT: mov sp, x29
+; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: ret
+; CHECK-SDAG-NEXT: .LBB6_4: // %catch
+; CHECK-SDAG-NEXT: .Ltmp20: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x1, x0
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB6_6
+; CHECK-SDAG-NEXT: // %bb.5: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB6_6: // %catch
+; CHECK-SDAG-NEXT: mov x0, x1
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl __cxa_begin_catch
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB6_8
+; CHECK-SDAG-NEXT: // %bb.7: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB6_8: // %catch
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl __cxa_end_catch
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB6_10
+; CHECK-SDAG-NEXT: // %bb.9: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB6_10: // %catch
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: b .LBB6_3
+entry:
+ invoke void @agnostic_za_call()
+ to label %exit unwind label %catch
+
+catch:
+ %eh_info = landingpad { ptr, i32 }
+ catch ptr null
+ %exception_ptr = extractvalue { ptr, i32 } %eh_info, 0
+ tail call ptr @__cxa_begin_catch(ptr %exception_ptr)
+ tail call void @__cxa_end_catch()
+ br label %exit
+
+exit:
+ ret void
+}
+
declare ptr @__cxa_allocate_exception(i64)
declare void @__cxa_throw(ptr, ptr, ptr)
declare ptr @__cxa_begin_catch(ptr)
@@ -742,3 +983,4 @@ declare void @may_throw()
declare void @shared_za_call() "aarch64_inout_za"
declare void @noexcept_shared_za_call() "aarch64_inout_za"
declare void @shared_zt0_call() "aarch64_inout_zt0"
+declare void @agnostic_za_call() "aarch64_za_state_agnostic"
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/PSVResources.ll b/llvm/test/CodeGen/DirectX/ContainerData/PSVResources.ll
index bea0310..70224fc 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/PSVResources.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/PSVResources.ll
@@ -94,6 +94,18 @@ define void @main() #0 {
%uav2_2 = call target("dx.TypedBuffer", <4 x float>, 1, 0, 0)
@llvm.dx.resource.handlefrombinding.tdx.TypedBuffer_f32_1_0(
i32 4, i32 0, i32 10, i32 5, ptr null)
+
+ ; RWBuffer<float4> UnboundedArray[] : register(u10, space5)
+; CHECK: - Type: UAVTyped
+; CHECK: Space: 5
+; CHECK: LowerBound: 10
+; CHECK: UpperBound: 4294967295
+; CHECK: Kind: TypedBuffer
+; CHECK: Flags:
+; CHECK: UsedByAtomic64: false
+ ; RWBuffer<float4> Buf = BufferArray[100];
+ %uav3 = call target("dx.TypedBuffer", <4 x float>, 1, 0, 0)
+ @llvm.dx.resource.handlefrombinding(i32 5, i32 10, i32 -1, i32 100, ptr null)
ret void
}
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vselect.ll b/llvm/test/CodeGen/LoongArch/lasx/vselect.ll
index bf31ccb..559cc53 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vselect.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vselect.ll
@@ -32,6 +32,40 @@ define void @select_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind {
ret void
}
+define void @select_v32i8_1(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v32i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI2_0)
+; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI2_0)
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <32 x i8>, ptr %a0
+ %v1 = load <32 x i8>, ptr %a1
+ %sel = select <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <32 x i8> %v0, <32 x i8> %v1
+ store <32 x i8> %sel, ptr %res
+ ret void
+}
+
+define void @select_v32i8_2(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v32i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI3_0)
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <32 x i8>, ptr %a0
+ %v1 = load <32 x i8>, ptr %a1
+ %sel = select <32 x i1> <i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <32 x i8> %v0, <32 x i8> %v1
+ store <32 x i8> %sel, ptr %res
+ ret void
+}
+
define void @select_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind {
; CHECK-LABEL: select_v16i16:
; CHECK: # %bb.0:
@@ -49,6 +83,40 @@ define void @select_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind {
ret void
}
+define void @select_v16i16_1(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v16i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI5_0)
+; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI5_0)
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <16 x i16>, ptr %a0
+ %v1 = load <16 x i16>, ptr %a1
+ %sel = select <16 x i1> <i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i16> %v0, <16 x i16> %v1
+ store <16 x i16> %sel, ptr %res
+ ret void
+}
+
+define void @select_v16i16_2(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v16i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI6_0)
+; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI6_0)
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <16 x i16>, ptr %a0
+ %v1 = load <16 x i16>, ptr %a1
+ %sel = select <16 x i1> <i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x i16> %v0, <16 x i16> %v1
+ store <16 x i16> %sel, ptr %res
+ ret void
+}
+
define void @select_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
; CHECK-LABEL: select_v8i32:
; CHECK: # %bb.0:
@@ -65,19 +133,70 @@ define void @select_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
ret void
}
+define void @select_v8i32_1(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI8_0)
+; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI8_0)
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <8 x i32>, ptr %a0
+ %v1 = load <8 x i32>, ptr %a1
+ %sel = select <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <8 x i32> %v0, <8 x i32> %v1
+ store <8 x i32> %sel, ptr %res
+ ret void
+}
+
+define void @select_v8f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI9_0)
+; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI9_0)
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <8 x float>, ptr %a0
+ %v1 = load <8 x float>, ptr %a1
+ %sel = select <8 x i1> <i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false>, <8 x float> %v0, <8 x float> %v1
+ store <8 x float> %sel, ptr %res
+ ret void
+}
+
define void @select_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind {
; CHECK-LABEL: select_v4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: xvld $xr0, $a1, 0
; CHECK-NEXT: xvld $xr1, $a2, 0
-; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI4_0)
-; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI4_0)
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI10_0)
+; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI10_0)
; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
%v0 = load <4 x i64>, ptr %a0
%v1 = load <4 x i64>, ptr %a1
- %sel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i64> %v0, <4 x i64> %v1
+ %sel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> %v0, <4 x i64> %v1
store <4 x i64> %sel, ptr %res
ret void
}
+
+define void @select_v4f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI11_0)
+; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI11_0)
+; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <4 x double>, ptr %a0
+ %v1 = load <4 x double>, ptr %a1
+ %sel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x double> %v0, <4 x double> %v1
+ store <4 x double> %sel, ptr %res
+ ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vselect.ll b/llvm/test/CodeGen/LoongArch/lsx/vselect.ll
index 8f25a6b..25c4f09 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vselect.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vselect.ll
@@ -16,6 +16,20 @@ define void @select_v16i8_imm(ptr %res, ptr %a0) nounwind {
ret void
}
+define void @select_v16i8_imm_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: select_v16i8_imm_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vrepli.h $vr1, -256
+; CHECK-NEXT: vbitseli.b $vr1, $vr0, 1
+; CHECK-NEXT: vst $vr1, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <16 x i8>, ptr %a0
+ %sel = select <16 x i1> <i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %v0
+ store <16 x i8> %sel, ptr %res
+ ret void
+}
+
define void @select_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
; CHECK-LABEL: select_v16i8:
; CHECK: # %bb.0:
@@ -32,6 +46,40 @@ define void @select_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
ret void
}
+define void @select_v16i8_1(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v16i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI3_0)
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <16 x i8>, ptr %a0
+ %v1 = load <16 x i8>, ptr %a1
+ %sel = select <16 x i1> <i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> %v0, <16 x i8> %v1
+ store <16 x i8> %sel, ptr %res
+ ret void
+}
+
+define void @select_v16i8_2(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v16i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI4_0)
+; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI4_0)
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <16 x i8>, ptr %a0
+ %v1 = load <16 x i8>, ptr %a1
+ %sel = select <16 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false>, <16 x i8> %v0, <16 x i8> %v1
+ store <16 x i8> %sel, ptr %res
+ ret void
+}
+
define void @select_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
; CHECK-LABEL: select_v8i16:
; CHECK: # %bb.0:
@@ -49,6 +97,40 @@ define void @select_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
ret void
}
+define void @select_v8i16_1(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI6_0)
+; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI6_0)
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <8 x i16>, ptr %a0
+ %v1 = load <8 x i16>, ptr %a1
+ %sel = select <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %v0, <8 x i16> %v1
+ store <8 x i16> %sel, ptr %res
+ ret void
+}
+
+define void @select_v8i16_2(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI7_0)
+; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI7_0)
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <8 x i16>, ptr %a0
+ %v1 = load <8 x i16>, ptr %a1
+ %sel = select <8 x i1> <i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %v0, <8 x i16> %v1
+ store <8 x i16> %sel, ptr %res
+ ret void
+}
+
define void @select_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
; CHECK-LABEL: select_v4i32:
; CHECK: # %bb.0:
@@ -65,13 +147,47 @@ define void @select_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
ret void
}
+define void @select_v4i32_1(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v4i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI9_0)
+; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI9_0)
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <4 x i32>, ptr %a0
+ %v1 = load <4 x i32>, ptr %a1
+ %sel = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x i32> %v0, <4 x i32> %v1
+ store <4 x i32> %sel, ptr %res
+ ret void
+}
+
+define void @select_v4f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI10_0)
+; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI10_0)
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <4 x float>, ptr %a0
+ %v1 = load <4 x float>, ptr %a1
+ %sel = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %v0, <4 x float> %v1
+ store <4 x float> %sel, ptr %res
+ ret void
+}
+
define void @select_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
; CHECK-LABEL: select_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vld $vr0, $a1, 0
; CHECK-NEXT: vld $vr1, $a2, 0
-; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI4_0)
-; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI4_0)
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI11_0)
+; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI11_0)
; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2
; CHECK-NEXT: vst $vr0, $a0, 0
; CHECK-NEXT: ret
@@ -81,3 +197,20 @@ define void @select_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
store <2 x i64> %sel, ptr %res
ret void
}
+
+define void @select_v2f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: select_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI12_0)
+; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI12_0)
+; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+ %v0 = load <2 x double>, ptr %a0
+ %v1 = load <2 x double>, ptr %a1
+ %sel = select <2 x i1> <i1 false, i1 true>, <2 x double> %v0, <2 x double> %v1
+ store <2 x double> %sel, ptr %res
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
index e71f59c..cad684e 100644
--- a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
+++ b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
@@ -325,24 +325,21 @@ define float @sqrt_afn_ieee(float %x) #0 {
;
; GLOBAL-LABEL: sqrt_afn_ieee:
; GLOBAL: # %bb.0:
-; GLOBAL-NEXT: addis 3, 2, .LCPI11_1@toc@ha
-; GLOBAL-NEXT: xsabsdp 0, 1
-; GLOBAL-NEXT: lfs 2, .LCPI11_1@toc@l(3)
-; GLOBAL-NEXT: fcmpu 0, 0, 2
-; GLOBAL-NEXT: xxlxor 0, 0, 0
-; GLOBAL-NEXT: blt 0, .LBB11_2
-; GLOBAL-NEXT: # %bb.1:
; GLOBAL-NEXT: xsrsqrtesp 0, 1
; GLOBAL-NEXT: vspltisw 2, -3
; GLOBAL-NEXT: addis 3, 2, .LCPI11_0@toc@ha
-; GLOBAL-NEXT: xvcvsxwdp 2, 34
-; GLOBAL-NEXT: xsmulsp 1, 1, 0
-; GLOBAL-NEXT: xsmaddasp 2, 1, 0
+; GLOBAL-NEXT: xvcvsxwdp 3, 34
+; GLOBAL-NEXT: xsmulsp 2, 1, 0
+; GLOBAL-NEXT: xsabsdp 1, 1
+; GLOBAL-NEXT: xsmaddasp 3, 2, 0
; GLOBAL-NEXT: lfs 0, .LCPI11_0@toc@l(3)
-; GLOBAL-NEXT: xsmulsp 0, 1, 0
-; GLOBAL-NEXT: xsmulsp 0, 0, 2
-; GLOBAL-NEXT: .LBB11_2:
-; GLOBAL-NEXT: fmr 1, 0
+; GLOBAL-NEXT: addis 3, 2, .LCPI11_1@toc@ha
+; GLOBAL-NEXT: xsmulsp 0, 2, 0
+; GLOBAL-NEXT: lfs 2, .LCPI11_1@toc@l(3)
+; GLOBAL-NEXT: xssubsp 1, 1, 2
+; GLOBAL-NEXT: xxlxor 2, 2, 2
+; GLOBAL-NEXT: xsmulsp 0, 0, 3
+; GLOBAL-NEXT: fsel 1, 1, 0, 2
; GLOBAL-NEXT: blr
%rt = call afn ninf float @llvm.sqrt.f32(float %x)
ret float %rt
@@ -393,21 +390,19 @@ define float @sqrt_afn_preserve_sign(float %x) #1 {
;
; GLOBAL-LABEL: sqrt_afn_preserve_sign:
; GLOBAL: # %bb.0:
-; GLOBAL-NEXT: xxlxor 0, 0, 0
-; GLOBAL-NEXT: fcmpu 0, 1, 0
-; GLOBAL-NEXT: beq 0, .LBB13_2
-; GLOBAL-NEXT: # %bb.1:
; GLOBAL-NEXT: xsrsqrtesp 0, 1
; GLOBAL-NEXT: vspltisw 2, -3
; GLOBAL-NEXT: addis 3, 2, .LCPI13_0@toc@ha
-; GLOBAL-NEXT: xvcvsxwdp 2, 34
-; GLOBAL-NEXT: xsmulsp 1, 1, 0
-; GLOBAL-NEXT: xsmaddasp 2, 1, 0
+; GLOBAL-NEXT: xvcvsxwdp 3, 34
+; GLOBAL-NEXT: xsmulsp 2, 1, 0
+; GLOBAL-NEXT: xsmaddasp 3, 2, 0
; GLOBAL-NEXT: lfs 0, .LCPI13_0@toc@l(3)
-; GLOBAL-NEXT: xsmulsp 0, 1, 0
-; GLOBAL-NEXT: xsmulsp 0, 0, 2
-; GLOBAL-NEXT: .LBB13_2:
-; GLOBAL-NEXT: fmr 1, 0
+; GLOBAL-NEXT: xsmulsp 0, 2, 0
+; GLOBAL-NEXT: xxlxor 2, 2, 2
+; GLOBAL-NEXT: xsmulsp 0, 0, 3
+; GLOBAL-NEXT: fsel 2, 1, 2, 0
+; GLOBAL-NEXT: xsnegdp 1, 1
+; GLOBAL-NEXT: fsel 1, 1, 2, 0
; GLOBAL-NEXT: blr
%rt = call afn ninf float @llvm.sqrt.f32(float %x)
ret float %rt
@@ -462,24 +457,21 @@ define float @sqrt_fast_ieee(float %x) #0 {
;
; GLOBAL-LABEL: sqrt_fast_ieee:
; GLOBAL: # %bb.0:
-; GLOBAL-NEXT: addis 3, 2, .LCPI15_1@toc@ha
-; GLOBAL-NEXT: xsabsdp 0, 1
-; GLOBAL-NEXT: lfs 2, .LCPI15_1@toc@l(3)
-; GLOBAL-NEXT: fcmpu 0, 0, 2
-; GLOBAL-NEXT: xxlxor 0, 0, 0
-; GLOBAL-NEXT: blt 0, .LBB15_2
-; GLOBAL-NEXT: # %bb.1:
; GLOBAL-NEXT: xsrsqrtesp 0, 1
; GLOBAL-NEXT: vspltisw 2, -3
; GLOBAL-NEXT: addis 3, 2, .LCPI15_0@toc@ha
-; GLOBAL-NEXT: xvcvsxwdp 2, 34
-; GLOBAL-NEXT: xsmulsp 1, 1, 0
-; GLOBAL-NEXT: xsmaddasp 2, 1, 0
+; GLOBAL-NEXT: xvcvsxwdp 3, 34
+; GLOBAL-NEXT: xsmulsp 2, 1, 0
+; GLOBAL-NEXT: xsabsdp 1, 1
+; GLOBAL-NEXT: xsmaddasp 3, 2, 0
; GLOBAL-NEXT: lfs 0, .LCPI15_0@toc@l(3)
-; GLOBAL-NEXT: xsmulsp 0, 1, 0
-; GLOBAL-NEXT: xsmulsp 0, 0, 2
-; GLOBAL-NEXT: .LBB15_2:
-; GLOBAL-NEXT: fmr 1, 0
+; GLOBAL-NEXT: addis 3, 2, .LCPI15_1@toc@ha
+; GLOBAL-NEXT: xsmulsp 0, 2, 0
+; GLOBAL-NEXT: lfs 2, .LCPI15_1@toc@l(3)
+; GLOBAL-NEXT: xssubsp 1, 1, 2
+; GLOBAL-NEXT: xxlxor 2, 2, 2
+; GLOBAL-NEXT: xsmulsp 0, 0, 3
+; GLOBAL-NEXT: fsel 1, 1, 0, 2
; GLOBAL-NEXT: blr
%rt = call contract reassoc afn ninf float @llvm.sqrt.f32(float %x)
ret float %rt
@@ -517,21 +509,19 @@ define float @sqrt_fast_preserve_sign(float %x) #1 {
;
; GLOBAL-LABEL: sqrt_fast_preserve_sign:
; GLOBAL: # %bb.0:
-; GLOBAL-NEXT: xxlxor 0, 0, 0
-; GLOBAL-NEXT: fcmpu 0, 1, 0
-; GLOBAL-NEXT: beq 0, .LBB16_2
-; GLOBAL-NEXT: # %bb.1:
; GLOBAL-NEXT: xsrsqrtesp 0, 1
; GLOBAL-NEXT: vspltisw 2, -3
; GLOBAL-NEXT: addis 3, 2, .LCPI16_0@toc@ha
-; GLOBAL-NEXT: xvcvsxwdp 2, 34
-; GLOBAL-NEXT: xsmulsp 1, 1, 0
-; GLOBAL-NEXT: xsmaddasp 2, 1, 0
+; GLOBAL-NEXT: xvcvsxwdp 3, 34
+; GLOBAL-NEXT: xsmulsp 2, 1, 0
+; GLOBAL-NEXT: xsmaddasp 3, 2, 0
; GLOBAL-NEXT: lfs 0, .LCPI16_0@toc@l(3)
-; GLOBAL-NEXT: xsmulsp 0, 1, 0
-; GLOBAL-NEXT: xsmulsp 0, 0, 2
-; GLOBAL-NEXT: .LBB16_2:
-; GLOBAL-NEXT: fmr 1, 0
+; GLOBAL-NEXT: xsmulsp 0, 2, 0
+; GLOBAL-NEXT: xxlxor 2, 2, 2
+; GLOBAL-NEXT: xsmulsp 0, 0, 3
+; GLOBAL-NEXT: fsel 2, 1, 2, 0
+; GLOBAL-NEXT: xsnegdp 1, 1
+; GLOBAL-NEXT: fsel 1, 1, 2, 0
; GLOBAL-NEXT: blr
%rt = call contract reassoc ninf afn float @llvm.sqrt.f32(float %x)
ret float %rt
diff --git a/llvm/test/CodeGen/PowerPC/lxvkq-vec-constant.ll b/llvm/test/CodeGen/PowerPC/lxvkq-vec-constant.ll
new file mode 100644
index 0000000..0ee4524
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/lxvkq-vec-constant.ll
@@ -0,0 +1,307 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+
+; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-unknown \
+; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC64-LE-10
+
+; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64-unknown-unknown \
+; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC64-BE-10
+
+; Test LXVKQ instruction generation for special vector constants matching 128 bit patterns:
+; 0x8000_0000_0000_0000_0000_0000_0000_0000 (MSB set pattern)
+; 0x0000_0000_0000_0000_0000_0000_0000_0001 (LSB set pattern)
+
+; =============================================================================
+; v2i64 tests - MSB set pattern (0x8000_0000_0000_0000_0000_0000_0000_0000)
+; =============================================================================
+
+; Big-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <-9223372036854775808, 0>
+define dso_local noundef <2 x i64> @test_v2i64_msb_set_bigendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v2i64_msb_set_bigendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: plxv v2, .LCPI0_0@PCREL(0), 1
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v2i64_msb_set_bigendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: lxvkq v2, 16
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <2 x i64> <i64 -9223372036854775808, i64 0>
+}
+
+; Little-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <0, -9223372036854775808>
+define dso_local noundef <2 x i64> @test_v2i64_msb_set_littleendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v2i64_msb_set_littleendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: lxvkq v2, 16
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v2i64_msb_set_littleendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI1_0@toc@ha
+; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI1_0@toc@l
+; POWERPC64-BE-10-NEXT: lxv v2, 0(r3)
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <2 x i64> <i64 0, i64 -9223372036854775808>
+}
+
+; =============================================================================
+; v4i32 tests - MSB set pattern (0x8000_0000_0000_0000_0000_0000_0000_0000)
+; =============================================================================
+
+; Big-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <-2147483648, 0, 0, 0>
+define dso_local noundef <4 x i32> @test_v4i32_msb_set_bigendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v4i32_msb_set_bigendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: plxv v2, .LCPI2_0@PCREL(0), 1
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v4i32_msb_set_bigendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: lxvkq v2, 16
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <4 x i32> <i32 -2147483648, i32 0, i32 0, i32 0>
+}
+
+; Little-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <0, 0, 0, -2147483648>
+define dso_local noundef <4 x i32> @test_v4i32_msb_set_littleendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v4i32_msb_set_littleendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: lxvkq v2, 16
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v4i32_msb_set_littleendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI3_0@toc@ha
+; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI3_0@toc@l
+; POWERPC64-BE-10-NEXT: lxv v2, 0(r3)
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <4 x i32> <i32 0, i32 0, i32 0, i32 -2147483648>
+}
+
+; =============================================================================
+; v8i16 tests - MSB set pattern (0x8000_0000_0000_0000_0000_0000_0000_0000)
+; =============================================================================
+
+; Big-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <-32768, 0, 0, 0, 0, 0, 0, 0>
+define dso_local noundef <8 x i16> @test_v8i16_msb_set_bigendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v8i16_msb_set_bigendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: plxv v2, .LCPI4_0@PCREL(0), 1
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v8i16_msb_set_bigendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: lxvkq v2, 16
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <8 x i16> <i16 -32768, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
+}
+
+; Little-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <0, 0, 0, 0, 0, 0, 0, -32768>
+define dso_local noundef <8 x i16> @test_v8i16_msb_set_littleendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v8i16_msb_set_littleendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: lxvkq v2, 16
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v8i16_msb_set_littleendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI5_0@toc@ha
+; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI5_0@toc@l
+; POWERPC64-BE-10-NEXT: lxv v2, 0(r3)
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 -32768>
+}
+
+; =============================================================================
+; v16i8 tests - MSB set pattern (0x8000_0000_0000_0000_0000_0000_0000_0000)
+; =============================================================================
+
+; Big-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <-128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>
+define dso_local noundef <16 x i8> @test_v16i8_msb_set_bigendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v16i8_msb_set_bigendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: plxv v2, .LCPI6_0@PCREL(0), 1
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v16i8_msb_set_bigendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: lxvkq v2, 16
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <16 x i8> <i8 -128, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+}
+
+; Little-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -128>
+define dso_local noundef <16 x i8> @test_v16i8_msb_set_littleendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v16i8_msb_set_littleendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: lxvkq v2, 16
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v16i8_msb_set_littleendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI7_0@toc@ha
+; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI7_0@toc@l
+; POWERPC64-BE-10-NEXT: lxv v2, 0(r3)
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 -128>
+}
+
+; =============================================================================
+; v2i64 tests - LSB set pattern (0x0000_0000_0000_0000_0000_0000_0000_0001)
+; =============================================================================
+
+; Big-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <0, 1>
+define dso_local noundef <2 x i64> @test_v2i64_lsb_set_bigendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v2i64_lsb_set_bigendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: plxv v2, .LCPI8_0@PCREL(0), 1
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v2i64_lsb_set_bigendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: xxspltib v2, 255
+; POWERPC64-BE-10-NEXT: vsrq v2, v2, v2
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <2 x i64> <i64 0, i64 1>
+}
+
+; Little-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <1, 0>
+define dso_local noundef <2 x i64> @test_v2i64_lsb_set_littleendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v2i64_lsb_set_littleendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: xxspltib v2, 255
+; POWERPC64-LE-10-NEXT: vsrq v2, v2, v2
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v2i64_lsb_set_littleendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI9_0@toc@ha
+; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI9_0@toc@l
+; POWERPC64-BE-10-NEXT: lxv v2, 0(r3)
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <2 x i64> <i64 1, i64 0>
+}
+
+; =============================================================================
+; v4i32 tests - LSB set pattern (0x0000_0000_0000_0000_0000_0000_0000_0001)
+; =============================================================================
+
+; Big-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <0, 0, 0, 1>
+define dso_local noundef <4 x i32> @test_v4i32_lsb_set_bigendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v4i32_lsb_set_bigendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: plxv v2, .LCPI10_0@PCREL(0), 1
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v4i32_lsb_set_bigendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: xxspltib v2, 255
+; POWERPC64-BE-10-NEXT: vsrq v2, v2, v2
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <4 x i32> <i32 0, i32 0, i32 0, i32 1>
+}
+
+; Little-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <1, 0, 0, 0>
+define dso_local noundef <4 x i32> @test_v4i32_lsb_set_littleendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v4i32_lsb_set_littleendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: xxspltib v2, 255
+; POWERPC64-LE-10-NEXT: vsrq v2, v2, v2
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v4i32_lsb_set_littleendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI11_0@toc@ha
+; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI11_0@toc@l
+; POWERPC64-BE-10-NEXT: lxv v2, 0(r3)
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+}
+
+; =============================================================================
+; v8i16 tests - LSB set pattern (0x0000_0000_0000_0000_0000_0000_0000_0001)
+; =============================================================================
+
+; Big-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <0, 0, 0, 0, 0, 0, 0, 1>
+define dso_local noundef <8 x i16> @test_v8i16_lsb_set_bigendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v8i16_lsb_set_bigendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: plxv v2, .LCPI12_0@PCREL(0), 1
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v8i16_lsb_set_bigendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: xxspltib v2, 255
+; POWERPC64-BE-10-NEXT: vsrq v2, v2, v2
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 1>
+}
+
+; Little-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <1, 0, 0, 0, 0, 0, 0, 0>
+define dso_local noundef <8 x i16> @test_v8i16_lsb_set_littleendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v8i16_lsb_set_littleendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: xxspltib v2, 255
+; POWERPC64-LE-10-NEXT: vsrq v2, v2, v2
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v8i16_lsb_set_littleendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI13_0@toc@ha
+; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI13_0@toc@l
+; POWERPC64-BE-10-NEXT: lxv v2, 0(r3)
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <8 x i16> <i16 1, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
+}
+
+; =============================================================================
+; v16i8 tests - LSB set pattern (0x0000_0000_0000_0000_0000_0000_0000_0001)
+; =============================================================================
+
+; Big-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1>
+define dso_local noundef <16 x i8> @test_v16i8_lsb_set_bigendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v16i8_lsb_set_bigendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: plxv v2, .LCPI14_0@PCREL(0), 1
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v16i8_lsb_set_bigendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: xxspltib v2, 255
+; POWERPC64-BE-10-NEXT: vsrq v2, v2, v2
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 1>
+}
+
+; Little-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>
+define dso_local noundef <16 x i8> @test_v16i8_lsb_set_littleendian() local_unnamed_addr {
+; POWERPC64-LE-10-LABEL: test_v16i8_lsb_set_littleendian:
+; POWERPC64-LE-10: # %bb.0: # %entry
+; POWERPC64-LE-10-NEXT: xxspltib v2, 255
+; POWERPC64-LE-10-NEXT: vsrq v2, v2, v2
+; POWERPC64-LE-10-NEXT: blr
+;
+; POWERPC64-BE-10-LABEL: test_v16i8_lsb_set_littleendian:
+; POWERPC64-BE-10: # %bb.0: # %entry
+; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI15_0@toc@ha
+; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI15_0@toc@l
+; POWERPC64-BE-10-NEXT: lxv v2, 0(r3)
+; POWERPC64-BE-10-NEXT: blr
+entry:
+ ret <16 x i8> <i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+} \ No newline at end of file
diff --git a/llvm/test/CodeGen/PowerPC/vector-reduce-add.ll b/llvm/test/CodeGen/PowerPC/vector-reduce-add.ll
index 0892210..d506d20 100644
--- a/llvm/test/CodeGen/PowerPC/vector-reduce-add.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-reduce-add.ll
@@ -1566,12 +1566,16 @@ define dso_local i64 @v16i8tov16i64_sign(<16 x i8> %a) local_unnamed_addr #0 {
; PWR10BE-LABEL: v16i8tov16i64_sign:
; PWR10BE: # %bb.0: # %entry
; PWR10BE-NEXT: addis r3, r2, .LCPI23_0@toc@ha
+; PWR10BE-NEXT: xxspltib v1, 255
; PWR10BE-NEXT: addi r3, r3, .LCPI23_0@toc@l
+; PWR10BE-NEXT: vsrq v1, v1, v1
; PWR10BE-NEXT: lxv v3, 0(r3)
; PWR10BE-NEXT: addis r3, r2, .LCPI23_1@toc@ha
; PWR10BE-NEXT: addi r3, r3, .LCPI23_1@toc@l
+; PWR10BE-NEXT: vperm v1, v2, v2, v1
; PWR10BE-NEXT: lxv v4, 0(r3)
; PWR10BE-NEXT: addis r3, r2, .LCPI23_2@toc@ha
+; PWR10BE-NEXT: vextsb2d v1, v1
; PWR10BE-NEXT: vperm v3, v2, v2, v3
; PWR10BE-NEXT: addi r3, r3, .LCPI23_2@toc@l
; PWR10BE-NEXT: vextsb2d v3, v3
@@ -1585,23 +1589,18 @@ define dso_local i64 @v16i8tov16i64_sign(<16 x i8> %a) local_unnamed_addr #0 {
; PWR10BE-NEXT: vperm v5, v2, v2, v5
; PWR10BE-NEXT: addi r3, r3, .LCPI23_4@toc@l
; PWR10BE-NEXT: vextsb2d v5, v5
-; PWR10BE-NEXT: lxv v1, 0(r3)
+; PWR10BE-NEXT: lxv v6, 0(r3)
; PWR10BE-NEXT: addis r3, r2, .LCPI23_5@toc@ha
; PWR10BE-NEXT: vperm v0, v2, v2, v0
; PWR10BE-NEXT: addi r3, r3, .LCPI23_5@toc@l
; PWR10BE-NEXT: vextsb2d v0, v0
-; PWR10BE-NEXT: lxv v6, 0(r3)
+; PWR10BE-NEXT: lxv v7, 0(r3)
; PWR10BE-NEXT: addis r3, r2, .LCPI23_6@toc@ha
-; PWR10BE-NEXT: vperm v1, v2, v2, v1
+; PWR10BE-NEXT: vperm v6, v2, v2, v6
; PWR10BE-NEXT: vaddudm v5, v0, v5
; PWR10BE-NEXT: vaddudm v3, v4, v3
; PWR10BE-NEXT: vaddudm v3, v3, v5
; PWR10BE-NEXT: addi r3, r3, .LCPI23_6@toc@l
-; PWR10BE-NEXT: vextsb2d v1, v1
-; PWR10BE-NEXT: lxv v7, 0(r3)
-; PWR10BE-NEXT: addis r3, r2, .LCPI23_7@toc@ha
-; PWR10BE-NEXT: vperm v6, v2, v2, v6
-; PWR10BE-NEXT: addi r3, r3, .LCPI23_7@toc@l
; PWR10BE-NEXT: vextsb2d v6, v6
; PWR10BE-NEXT: lxv v8, 0(r3)
; PWR10BE-NEXT: vperm v7, v2, v2, v7
@@ -1609,7 +1608,7 @@ define dso_local i64 @v16i8tov16i64_sign(<16 x i8> %a) local_unnamed_addr #0 {
; PWR10BE-NEXT: vperm v2, v2, v2, v8
; PWR10BE-NEXT: vextsb2d v2, v2
; PWR10BE-NEXT: vaddudm v2, v2, v7
-; PWR10BE-NEXT: vaddudm v4, v6, v1
+; PWR10BE-NEXT: vaddudm v4, v1, v6
; PWR10BE-NEXT: vaddudm v2, v4, v2
; PWR10BE-NEXT: vaddudm v2, v2, v3
; PWR10BE-NEXT: xxswapd v3, v2
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-eqv.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-eqv.ll
index 24a1724..ba7680b 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-eqv.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-eqv.ll
@@ -15,11 +15,9 @@ define <4 x i32> @ternary_A_or_BC_eqv_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_or_BC_eqv_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxleqv vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 151
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -34,12 +32,10 @@ define <2 x i64> @ternary_A_or_BC_eqv_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_or_BC_eqv_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxleqv vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 151
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -54,11 +50,9 @@ define <16 x i8> @ternary_A_or_BC_eqv_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_or_BC_eqv_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxleqv vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 151
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -73,11 +67,9 @@ define <8 x i16> @ternary_A_or_BC_eqv_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_or_BC_eqv_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxleqv vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 151
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -92,11 +84,9 @@ define <4 x i32> @ternary_A_nor_BC_eqv_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_nor_BC_eqv_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxleqv vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 152
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -112,12 +102,10 @@ define <2 x i64> @ternary_A_nor_BC_eqv_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_nor_BC_eqv_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxleqv vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 152
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -133,11 +121,9 @@ define <16 x i8> @ternary_A_nor_BC_eqv_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_nor_BC_eqv_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxleqv vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 152
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -153,11 +139,9 @@ define <8 x i16> @ternary_A_nor_BC_eqv_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_nor_BC_eqv_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v4
-; CHECK-NEXT: xxleqv vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 152
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -173,10 +157,9 @@ define <4 x i32> @ternary_A_not_C_eqv_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_not_C_eqv_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v4, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxeval v2, v2, vs0, v3, 99
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 154
; CHECK-NEXT: blr
entry:
%not = xor <4 x i32> %C, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation
@@ -191,12 +174,10 @@ define <2 x i64> @ternary_A_not_C_eqv_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_not_C_eqv_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxleqv vs1, v4, v3
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 154
; CHECK-NEXT: blr
entry:
%not = xor <2 x i64> %C, <i64 -1, i64 -1> ; Vector not operation
@@ -211,11 +192,9 @@ define <16 x i8> @ternary_A_not_C_eqv_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_not_C_eqv_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxleqv vs1, v4, v3
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 154
; CHECK-NEXT: blr
entry:
%not = xor <16 x i8> %C, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation
@@ -230,11 +209,9 @@ define <8 x i16> @ternary_A_not_C_eqv_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_not_C_eqv_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxleqv vs1, v4, v3
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 154
; CHECK-NEXT: blr
entry:
%not = xor <8 x i16> %C, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation
@@ -249,11 +226,9 @@ define <4 x i32> @ternary_A_nand_BC_eqv_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x
; CHECK-LABEL: ternary_A_nand_BC_eqv_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxleqv vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 158
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -269,12 +244,10 @@ define <2 x i64> @ternary_A_nand_BC_eqv_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x
; CHECK-LABEL: ternary_A_nand_BC_eqv_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxleqv vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 158
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -290,11 +263,9 @@ define <16 x i8> @ternary_A_nand_BC_eqv_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16
; CHECK-LABEL: ternary_A_nand_BC_eqv_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxleqv vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 158
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -310,11 +281,9 @@ define <8 x i16> @ternary_A_nand_BC_eqv_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x
; CHECK-LABEL: ternary_A_nand_BC_eqv_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxleqv vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 158
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nand.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nand.ll
index 7a6733d3..067b089 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nand.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nand.ll
@@ -15,10 +15,9 @@ define <4 x i32> @ternary_A_B_nand_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32>
; CHECK-LABEL: ternary_A_B_nand_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 227
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -32,11 +31,10 @@ define <2 x i64> @ternary_A_B_nand_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64>
; CHECK-LABEL: ternary_A_B_nand_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 227
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -50,10 +48,9 @@ define <16 x i8> @ternary_A_B_nand_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_B_nand_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 227
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -67,10 +64,9 @@ define <8 x i16> @ternary_A_B_nand_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16>
; CHECK-LABEL: ternary_A_B_nand_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 227
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -84,10 +80,9 @@ define <4 x i32> @ternary_A_C_nand_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32>
; CHECK-LABEL: ternary_A_C_nand_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v4, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 229
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -101,11 +96,10 @@ define <2 x i64> @ternary_A_C_nand_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64>
; CHECK-LABEL: ternary_A_C_nand_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v4, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 229
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -119,10 +113,9 @@ define <16 x i8> @ternary_A_C_nand_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_C_nand_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v4, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 229
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -136,10 +129,9 @@ define <8 x i16> @ternary_A_C_nand_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16>
; CHECK-LABEL: ternary_A_C_nand_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v4, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 229
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -153,11 +145,9 @@ define <4 x i32> @ternary_A_xor_BC_nand_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x
; CHECK-LABEL: ternary_A_xor_BC_nand_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnand vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 230
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -172,12 +162,10 @@ define <2 x i64> @ternary_A_xor_BC_nand_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x
; CHECK-LABEL: ternary_A_xor_BC_nand_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnand vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 230
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -192,11 +180,9 @@ define <16 x i8> @ternary_A_xor_BC_nand_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16
; CHECK-LABEL: ternary_A_xor_BC_nand_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnand vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 230
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -211,11 +197,9 @@ define <8 x i16> @ternary_A_xor_BC_nand_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x
; CHECK-LABEL: ternary_A_xor_BC_nand_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnand vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 230
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -230,11 +214,9 @@ define <4 x i32> @ternary_A_or_BC_nand_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_or_BC_nand_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlnand vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 231
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -249,12 +231,10 @@ define <2 x i64> @ternary_A_or_BC_nand_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_or_BC_nand_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlnand vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 231
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -269,11 +249,9 @@ define <16 x i8> @ternary_A_or_BC_nand_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_or_BC_nand_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlnand vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 231
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -288,11 +266,9 @@ define <8 x i16> @ternary_A_or_BC_nand_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_or_BC_nand_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlnand vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 231
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -307,11 +283,9 @@ define <4 x i32> @ternary_A_eqv_BC_nand_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x
; CHECK-LABEL: ternary_A_eqv_BC_nand_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
-; CHECK-NEXT: xxlnand vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 233
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -327,12 +301,10 @@ define <2 x i64> @ternary_A_eqv_BC_nand_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x
; CHECK-LABEL: ternary_A_eqv_BC_nand_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
-; CHECK-NEXT: xxlnand vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 233
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -348,11 +320,9 @@ define <16 x i8> @ternary_A_eqv_BC_nand_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16
; CHECK-LABEL: ternary_A_eqv_BC_nand_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxleqv vs0, v3, v4
-; CHECK-NEXT: xxlnand vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 233
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -368,11 +338,9 @@ define <8 x i16> @ternary_A_eqv_BC_nand_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x
; CHECK-LABEL: ternary_A_eqv_BC_nand_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxleqv vs0, v3, v4
-; CHECK-NEXT: xxlnand vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 233
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nor.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nor.ll
index d635952..3695874 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nor.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nor.ll
@@ -15,11 +15,9 @@ define <4 x i32> @ternary_A_and_BC_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_and_BC_nor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 129
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -34,12 +32,10 @@ define <2 x i64> @ternary_A_and_BC_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_and_BC_nor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 129
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -54,11 +50,9 @@ define <16 x i8> @ternary_A_and_BC_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_and_BC_nor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 129
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -73,11 +67,9 @@ define <8 x i16> @ternary_A_and_BC_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_and_BC_nor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 129
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -92,10 +84,9 @@ define <4 x i32> @ternary_A_B_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_B_nor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 131
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -109,11 +100,10 @@ define <2 x i64> @ternary_A_B_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_B_nor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 131
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -127,10 +117,9 @@ define <16 x i8> @ternary_A_B_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_B_nor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 131
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -144,10 +133,9 @@ define <8 x i16> @ternary_A_B_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_B_nor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 131
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -161,10 +149,9 @@ define <4 x i32> @ternary_A_C_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_C_nor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v4, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 133
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -178,11 +165,10 @@ define <2 x i64> @ternary_A_C_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_C_nor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v4, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 133
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -196,10 +182,9 @@ define <16 x i8> @ternary_A_C_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_C_nor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v4, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 133
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -213,10 +198,9 @@ define <8 x i16> @ternary_A_C_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_C_nor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v4, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 133
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -230,11 +214,9 @@ define <4 x i32> @ternary_A_xor_BC_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_xor_BC_nor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 134
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -249,12 +231,10 @@ define <2 x i64> @ternary_A_xor_BC_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_xor_BC_nor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 134
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -269,11 +249,9 @@ define <16 x i8> @ternary_A_xor_BC_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_xor_BC_nor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 134
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -288,11 +266,9 @@ define <8 x i16> @ternary_A_xor_BC_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_xor_BC_nor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 134
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -307,11 +283,9 @@ define <4 x i32> @ternary_A_not_C_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_not_C_nor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 138
; CHECK-NEXT: blr
entry:
%not = xor <4 x i32> %C, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation
@@ -326,12 +300,10 @@ define <2 x i64> @ternary_A_not_C_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_not_C_nor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 138
; CHECK-NEXT: blr
entry:
%not = xor <2 x i64> %C, <i64 -1, i64 -1> ; Vector not operation
@@ -346,11 +318,9 @@ define <16 x i8> @ternary_A_not_C_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_not_C_nor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 138
; CHECK-NEXT: blr
entry:
%not = xor <16 x i8> %C, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation
@@ -365,11 +335,9 @@ define <8 x i16> @ternary_A_not_C_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_not_C_nor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v4, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 138
; CHECK-NEXT: blr
entry:
%not = xor <8 x i16> %C, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation
@@ -384,11 +352,9 @@ define <4 x i32> @ternary_A_not_B_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_not_B_nor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 140
; CHECK-NEXT: blr
entry:
%not = xor <4 x i32> %B, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation
@@ -403,12 +369,10 @@ define <2 x i64> @ternary_A_not_B_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_not_B_nor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 140
; CHECK-NEXT: blr
entry:
%not = xor <2 x i64> %B, <i64 -1, i64 -1> ; Vector not operation
@@ -423,11 +387,9 @@ define <16 x i8> @ternary_A_not_B_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_not_B_nor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 140
; CHECK-NEXT: blr
entry:
%not = xor <16 x i8> %B, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation
@@ -442,11 +404,9 @@ define <8 x i16> @ternary_A_not_B_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_not_B_nor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 140
; CHECK-NEXT: blr
entry:
%not = xor <8 x i16> %B, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation
@@ -461,11 +421,9 @@ define <4 x i32> @ternary_A_nand_BC_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x
; CHECK-LABEL: ternary_A_nand_BC_nor_BC_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 142
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -481,12 +439,10 @@ define <2 x i64> @ternary_A_nand_BC_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x
; CHECK-LABEL: ternary_A_nand_BC_nor_BC_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 142
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -502,11 +458,9 @@ define <16 x i8> @ternary_A_nand_BC_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16
; CHECK-LABEL: ternary_A_nand_BC_nor_BC_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 142
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -522,11 +476,9 @@ define <8 x i16> @ternary_A_nand_BC_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x
; CHECK-LABEL: ternary_A_nand_BC_nor_BC_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 142
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-b.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-b.ll
index 6203a96..a67d9cf 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-b.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-b.ll
@@ -15,11 +15,9 @@ define <4 x i32> @ternary_A_and_BC_not_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_and_BC_not_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 193
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -33,12 +31,10 @@ define <2 x i64> @ternary_A_and_BC_not_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_and_BC_not_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 193
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -52,11 +48,9 @@ define <16 x i8> @ternary_A_and_BC_not_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_and_BC_not_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 193
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -70,11 +64,9 @@ define <8 x i16> @ternary_A_and_BC_not_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_and_BC_not_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 193
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -88,11 +80,9 @@ define <4 x i32> @ternary_A_xor_BC_not_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_xor_BC_not_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 198
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -106,12 +96,10 @@ define <2 x i64> @ternary_A_xor_BC_not_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_xor_BC_not_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 198
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -125,11 +113,9 @@ define <16 x i8> @ternary_A_xor_BC_not_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_xor_BC_not_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 198
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -143,11 +129,9 @@ define <8 x i16> @ternary_A_xor_BC_not_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_xor_BC_not_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 198
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -161,11 +145,9 @@ define <4 x i32> @ternary_A_or_BC_not_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32
; CHECK-LABEL: ternary_A_or_BC_not_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 199
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -179,12 +161,10 @@ define <2 x i64> @ternary_A_or_BC_not_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64
; CHECK-LABEL: ternary_A_or_BC_not_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 199
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -198,11 +178,9 @@ define <16 x i8> @ternary_A_or_BC_not_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i
; CHECK-LABEL: ternary_A_or_BC_not_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 199
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -216,11 +194,9 @@ define <8 x i16> @ternary_A_or_BC_not_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16
; CHECK-LABEL: ternary_A_or_BC_not_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 199
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -234,11 +210,9 @@ define <4 x i32> @ternary_A_nand_BC_not_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_nand_BC_not_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 206
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -253,12 +227,10 @@ define <2 x i64> @ternary_A_nand_BC_not_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_nand_BC_not_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 206
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -273,11 +245,9 @@ define <16 x i8> @ternary_A_nand_BC_not_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_nand_BC_not_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 206
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -292,11 +262,9 @@ define <8 x i16> @ternary_A_nand_BC_not_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_nand_BC_not_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v3, v3
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 206
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-c.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-c.ll
index 3479d94..98c1f28 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-c.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-c.ll
@@ -15,11 +15,9 @@ define <4 x i32> @ternary_A_and_BC_not_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_and_BC_not_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 161
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -33,12 +31,10 @@ define <2 x i64> @ternary_A_and_BC_not_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_and_BC_not_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 161
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -52,11 +48,9 @@ define <16 x i8> @ternary_A_and_BC_not_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_and_BC_not_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 161
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -70,11 +64,9 @@ define <8 x i16> @ternary_A_and_BC_not_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_and_BC_not_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxland vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 161
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -88,10 +80,9 @@ define <4 x i32> @ternary_A_B_not_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %C
; CHECK-LABEL: ternary_A_B_not_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v4, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 163
; CHECK-NEXT: blr
entry:
%not = xor <4 x i32> %C, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation
@@ -104,11 +95,10 @@ define <2 x i64> @ternary_A_B_not_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %C
; CHECK-LABEL: ternary_A_B_not_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v4, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 163
; CHECK-NEXT: blr
entry:
%not = xor <2 x i64> %C, <i64 -1, i64 -1> ; Vector not operation
@@ -121,10 +111,9 @@ define <16 x i8> @ternary_A_B_not_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8> %
; CHECK-LABEL: ternary_A_B_not_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v4, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 163
; CHECK-NEXT: blr
entry:
%not = xor <16 x i8> %C, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation
@@ -137,10 +126,9 @@ define <8 x i16> @ternary_A_B_not_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %C
; CHECK-LABEL: ternary_A_B_not_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v4, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs0, v3, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 163
; CHECK-NEXT: blr
entry:
%not = xor <8 x i16> %C, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation
@@ -153,11 +141,9 @@ define <4 x i32> @ternary_A_xor_BC_not_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3
; CHECK-LABEL: ternary_A_xor_BC_not_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 166
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -171,12 +157,10 @@ define <2 x i64> @ternary_A_xor_BC_not_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6
; CHECK-LABEL: ternary_A_xor_BC_not_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 166
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -190,11 +174,9 @@ define <16 x i8> @ternary_A_xor_BC_not_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_xor_BC_not_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 166
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -208,11 +190,9 @@ define <8 x i16> @ternary_A_xor_BC_not_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1
; CHECK-LABEL: ternary_A_xor_BC_not_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlxor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 166
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -226,11 +206,9 @@ define <4 x i32> @ternary_A_or_BC_not_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32
; CHECK-LABEL: ternary_A_or_BC_not_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 167
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -244,12 +222,10 @@ define <2 x i64> @ternary_A_or_BC_not_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64
; CHECK-LABEL: ternary_A_or_BC_not_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 167
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -263,11 +239,9 @@ define <16 x i8> @ternary_A_or_BC_not_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i
; CHECK-LABEL: ternary_A_or_BC_not_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 167
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -281,11 +255,9 @@ define <8 x i16> @ternary_A_or_BC_not_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16
; CHECK-LABEL: ternary_A_or_BC_not_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlor vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 167
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -299,11 +271,9 @@ define <4 x i32> @ternary_A_not_B_not_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32
; CHECK-LABEL: ternary_A_not_B_not_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 172
; CHECK-NEXT: blr
entry:
%not_b = xor <4 x i32> %B, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation
@@ -317,12 +287,10 @@ define <2 x i64> @ternary_A_not_B_not_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64
; CHECK-LABEL: ternary_A_not_B_not_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 172
; CHECK-NEXT: blr
entry:
%not_b = xor <2 x i64> %B, <i64 -1, i64 -1> ; Vector not operation
@@ -336,11 +304,9 @@ define <16 x i8> @ternary_A_not_B_not_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i
; CHECK-LABEL: ternary_A_not_B_not_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 172
; CHECK-NEXT: blr
entry:
%not_b = xor <16 x i8> %B, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation
@@ -354,11 +320,9 @@ define <8 x i16> @ternary_A_not_B_not_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16
; CHECK-LABEL: ternary_A_not_B_not_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v3
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 172
; CHECK-NEXT: blr
entry:
%not_b = xor <8 x i16> %B, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation
@@ -372,11 +336,9 @@ define <4 x i32> @ternary_A_nand_BC_not_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i
; CHECK-LABEL: ternary_A_nand_BC_not_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 174
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -391,12 +353,10 @@ define <2 x i64> @ternary_A_nand_BC_not_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i
; CHECK-LABEL: ternary_A_nand_BC_not_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 174
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -411,11 +371,9 @@ define <16 x i8> @ternary_A_nand_BC_not_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x
; CHECK-LABEL: ternary_A_nand_BC_not_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 174
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -430,11 +388,9 @@ define <8 x i16> @ternary_A_nand_BC_not_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i
; CHECK-LABEL: ternary_A_nand_BC_not_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnand vs0, v3, v4
-; CHECK-NEXT: xxlnor vs1, v4, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, vs1, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 174
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
diff --git a/llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.ll b/llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.ll
new file mode 100644
index 0000000..c19e93d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.ll
@@ -0,0 +1,76 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -O1 -mtriple=riscv64 -mattr=+v < %s | FileCheck %s
+
+define i32 @pr134424(i64 %input_value, i32 %base_value, i1 %cond_flag1, i1 %cond_flag2, i1 %cond_flag3) {
+; CHECK-LABEL: pr134424:
+; CHECK: # %bb.0: # %for.body.us.preheader.i
+; CHECK-NEXT: andi a3, a3, 1
+; CHECK-NEXT: andi a5, a2, 1
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 14
+; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: bnez a5, .LBB0_2
+; CHECK-NEXT: # %bb.1: # %for.body.us.preheader.i
+; CHECK-NEXT: li a2, 1
+; CHECK-NEXT: .LBB0_2: # %for.body.us.preheader.i
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
+; CHECK-NEXT: andi a4, a4, 1
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: bnez a3, .LBB0_4
+; CHECK-NEXT: # %bb.3: # %for.body.us.preheader.i
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: .LBB0_4: # %for.body.us.preheader.i
+; CHECK-NEXT: vmsle.vi v0, v8, 0
+; CHECK-NEXT: sext.w a2, a2
+; CHECK-NEXT: bnez a4, .LBB0_6
+; CHECK-NEXT: # %bb.5: # %for.body.us.preheader.i
+; CHECK-NEXT: li a1, 1
+; CHECK-NEXT: .LBB0_6: # %for.body.us.preheader.i
+; CHECK-NEXT: sext.w a0, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vredmin.vs v8, v8, v8
+; CHECK-NEXT: vmv.x.s a3, v8
+; CHECK-NEXT: sext.w a1, a1
+; CHECK-NEXT: bge a3, a2, .LBB0_11
+; CHECK-NEXT: # %bb.7: # %for.body.us.preheader.i
+; CHECK-NEXT: bge a0, a1, .LBB0_12
+; CHECK-NEXT: .LBB0_8: # %for.body.us.preheader.i
+; CHECK-NEXT: blt a3, a0, .LBB0_10
+; CHECK-NEXT: .LBB0_9: # %for.body.us.preheader.i
+; CHECK-NEXT: mv a3, a0
+; CHECK-NEXT: .LBB0_10: # %for.body.us.preheader.i
+; CHECK-NEXT: sw a3, 0(zero)
+; CHECK-NEXT: li a0, 0
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB0_11: # %for.body.us.preheader.i
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: blt a0, a1, .LBB0_8
+; CHECK-NEXT: .LBB0_12: # %for.body.us.preheader.i
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: bge a3, a0, .LBB0_9
+; CHECK-NEXT: j .LBB0_10
+for.body.us.preheader.i:
+ %partial_vector = insertelement <4 x i64> zeroinitializer, i64 %input_value, i64 1
+ %comparison_vector = shufflevector <4 x i64> %partial_vector, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 1, i32 1>
+ %comparison_result = icmp sle <4 x i64> %comparison_vector, zeroinitializer
+ %selected_value1 = select i1 %cond_flag1, i32 %base_value, i32 1
+ %selected_value2 = select i1 %cond_flag2, i32 %base_value, i32 1
+ %selected_value3 = select i1 %cond_flag3, i32 %base_value, i32 1
+ %bool_to_int = zext <4 x i1> %comparison_result to <4 x i32>
+ %extended_vector = shufflevector <4 x i32> %bool_to_int, <4 x i32> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+ %vector_min = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %extended_vector)
+ %min1 = call i32 @llvm.smin.i32(i32 %vector_min, i32 %selected_value1)
+ %min2 = call i32 @llvm.smin.i32(i32 %selected_value2, i32 %selected_value3)
+ %final_min = call i32 @llvm.smin.i32(i32 %min1, i32 %min2)
+ store i32 %final_min, ptr null, align 4
+ ret i32 0
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.mir b/llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.mir
new file mode 100644
index 0000000..aeab8f6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=register-coalescer -o - %s | FileCheck %s
+
+---
+name: pr71023
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: pr71023
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x10, $v8, $v10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
+ ; CHECK-NEXT: undef [[PseudoVMV_V_I_M1_:%[0-9]+]].sub_vrm1_2:vrn8m1 = PseudoVMV_V_I_M1 undef [[PseudoVMV_V_I_M1_]].sub_vrm1_2, 0, -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]].sub_vrm1_6:vrn8m1 = COPY undef [[PseudoVMV_V_I_M1_]].sub_vrm1_2
+ ; CHECK-NEXT: BNE undef [[DEF]], $x0, %bb.3
+ ; CHECK-NEXT: PseudoBR %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: BNE undef [[DEF]], $x0, %bb.3
+ ; CHECK-NEXT: PseudoBR %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: dead [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; CHECK-NEXT: early-clobber [[PseudoVMV_V_I_M1_]].sub_vrm1_0:vrn8m1 = PseudoVRGATHER_VI_M1 undef [[PseudoVMV_V_I_M1_]].sub_vrm1_0, [[PseudoVMV_V_I_M1_]].sub_vrm1_2, 0, 0, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: PseudoVSSEG6E8_V_M1_MASK [[PseudoVMV_V_I_M1_]].sub_vrm1_0_sub_vrm1_1_sub_vrm1_2_sub_vrm1_3_sub_vrm1_4_sub_vrm1_5, undef [[DEF]], killed undef $v0, 0, 3 /* e8 */, implicit $vl, implicit $vtype :: (store unknown-size, align 1)
+ ; CHECK-NEXT: PseudoRET
+ bb.0:
+ successors: %bb.3(0x40000000), %bb.1(0x40000000)
+ liveins: $x10, $v8, $v10
+ %0:gpr = IMPLICIT_DEF
+ %1:vrnov0 = PseudoVMV_V_I_M1 undef %1, 0, -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ %2:vrnov0 = IMPLICIT_DEF
+ undef %3.sub_vrm1_0:vrn6m1nov0 = COPY undef %1
+ %3.sub_vrm1_3:vrn6m1nov0 = COPY %2
+ %3.sub_vrm1_4:vrn6m1nov0 = COPY undef %1
+ BNE undef %0, $x0, %bb.3
+ PseudoBR %bb.1
+ bb.1:
+ successors: %bb.3(0x40000000), %bb.2(0x40000000)
+ BNE killed undef %0, $x0, %bb.3
+ PseudoBR %bb.2
+ bb.2:
+ successors: %bb.3(0x80000000)
+ bb.3:
+ %4:vr = IMPLICIT_DEF
+ early-clobber %4:vr = PseudoVRGATHER_VI_M1 undef %4, killed %1, 0, 0, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ undef %5.sub_vrm1_0:vrn6m1 = COPY killed %4
+ %5.sub_vrm1_5:vrn6m1 = COPY killed %2
+ PseudoVSSEG6E8_V_M1_MASK killed %5, undef %0, killed undef $v0, 0, 3 /* e8 */, implicit $vl, implicit $vtype :: (store unknown-size, align 1)
+ PseudoRET
+...
diff --git a/llvm/test/CodeGen/SPIRV/FCmpFalse.ll b/llvm/test/CodeGen/SPIRV/FCmpFalse.ll
new file mode 100644
index 0000000..55d64196
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/FCmpFalse.ll
@@ -0,0 +1,10 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: %[[#FalseVal:]] = OpConstantFalse %[[#]]
+; CHECK: OpReturnValue %[[#FalseVal:]]
+
+define spir_func i1 @f(float %0) {
+ %2 = fcmp false float %0, %0
+ ret i1 %2
+}
diff --git a/llvm/test/CodeGen/SPIRV/FCmpFalse_Vec.ll b/llvm/test/CodeGen/SPIRV/FCmpFalse_Vec.ll
new file mode 100644
index 0000000..c410b64
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/FCmpFalse_Vec.ll
@@ -0,0 +1,13 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: %[[#BoolTy:]] = OpTypeBool
+; CHECK: %[[#VecTy:]] = OpTypeVector %[[#BoolTy]] 4
+; CHECK: %[[#False:]] = OpConstantFalse %[[#BoolTy]]
+; CHECK: %[[#Composite:]] = OpConstantComposite %[[#VecTy]] %[[#False]] %[[#False]] %[[#False]] %[[#False]]
+; CHECK: OpReturnValue %[[#Composite]]
+
+define spir_func <4 x i1> @test(<4 x float> %a) {
+ %compare = fcmp false <4 x float> %a, %a
+ ret <4 x i1> %compare
+}
diff --git a/llvm/test/CodeGen/SPIRV/builtin_duplicate.ll b/llvm/test/CodeGen/SPIRV/builtin_duplicate.ll
new file mode 100644
index 0000000..8786554
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/builtin_duplicate.ll
@@ -0,0 +1,20 @@
+;; This test checks if we generate a single builtin variable for the following
+;; LLVM IR.
+;; @__spirv_BuiltInLocalInvocationId - A global variable
+;; %3 = tail call i64 @_Z12get_local_idj(i32 0) - A function call
+
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpName %[[#]] "__spirv_BuiltInLocalInvocationId"
+; CHECK-NOT: OpName %[[#]] "__spirv_BuiltInLocalInvocationId.1"
+
+@__spirv_BuiltInLocalInvocationId = external dso_local local_unnamed_addr addrspace(1) constant <3 x i64>, align 32
+
+declare spir_func i64 @_Z12get_local_idj(i32) local_unnamed_addr
+
+define spir_kernel void @test(i32 %a) {
+entry:
+ %builtin_call = tail call i64 @_Z12get_local_idj(i32 0)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/complex-constexpr.ll b/llvm/test/CodeGen/SPIRV/complex-constexpr.ll
new file mode 100644
index 0000000..e2c1d00
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/complex-constexpr.ll
@@ -0,0 +1,21 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+@.str.1 = private unnamed_addr addrspace(1) constant [1 x i8] zeroinitializer, align 1
+
+define linkonce_odr hidden spir_func void @test() {
+entry:
+; CHECK: %[[#MinusOne:]] = OpConstant %[[#]] 18446744073709551615
+; CHECK: %[[#Ptr:]] = OpConvertUToPtr %[[#]] %[[#MinusOne]]
+; CHECK: %[[#PtrCast:]] = OpPtrCastToGeneric %[[#]] %[[#]]
+; CHECK: %[[#]] = OpFunctionCall %[[#]] %[[#]] %[[#PtrCast]] %[[#Ptr]]
+
+ %cast = bitcast ptr addrspace(4) inttoptr (i64 -1 to ptr addrspace(4)) to ptr addrspace(4)
+ call spir_func void @bar(ptr addrspace(4) addrspacecast (ptr addrspace(1) @.str.1 to ptr addrspace(4)), ptr addrspace(4) %cast)
+ ret void
+}
+
+define linkonce_odr hidden spir_func void @bar(ptr addrspace(4) %begin, ptr addrspace(4) %end) {
+entry:
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/dominator-order.ll b/llvm/test/CodeGen/SPIRV/dominator-order.ll
new file mode 100644
index 0000000..2ecdddc
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/dominator-order.ll
@@ -0,0 +1,25 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; This test checks that basic blocks are reordered in SPIR-V so that dominators
+; are emitted ahead of their dominated blocks as required by the SPIR-V
+; specification.
+
+; CHECK-DAG: OpName %[[#ENTRY:]] "entry"
+; CHECK-DAG: OpName %[[#FOR_BODY137_LR_PH:]] "for.body137.lr.ph"
+; CHECK-DAG: OpName %[[#FOR_BODY:]] "for.body"
+
+; CHECK: %[[#ENTRY]] = OpLabel
+; CHECK: %[[#FOR_BODY]] = OpLabel
+; CHECK: %[[#FOR_BODY137_LR_PH]] = OpLabel
+
+define spir_kernel void @test(ptr addrspace(1) %arg, i1 %cond) {
+entry:
+ br label %for.body
+
+for.body137.lr.ph: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ br i1 %cond, label %for.body, label %for.body137.lr.ph
+}
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fake_use.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fake_use.ll
new file mode 100644
index 0000000..5370b51
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fake_use.ll
@@ -0,0 +1,13 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpCapability Addresses
+; CHECK-DAG: OpName %[[#]] "foo"
+
+declare void @llvm.fake.use(...)
+
+define spir_kernel void @foo(ptr addrspace(1) %a) {
+entry:
+ call void (...) @llvm.fake.use(ptr addrspace(1) %a)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchange_cl20.ll b/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchange_cl20.ll
new file mode 100644
index 0000000..8357373
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchange_cl20.ll
@@ -0,0 +1,84 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64v1.2-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-NOT: OpCapability Int64Atomics
+
+; CHECK-DAG: %[[#int:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#int8:]] = OpTypeInt 8 0
+; CHECK-DAG: %[[#DeviceScope:]] = OpConstant %[[#int]] 1
+; CHECK-DAG: %[[#SequentiallyConsistent_MS:]] = OpConstant %[[#int]] 16
+; CHECK-DAG: %[[#int_ptr:]] = OpTypePointer Generic %[[#int]]
+; CHECK-DAG: %[[#int_ptr8:]] = OpTypePointer Generic %[[#int8]]
+; CHECK-DAG: %[[#bool:]] = OpTypeBool
+
+define spir_func void @test(ptr addrspace(4) %object, ptr addrspace(4) %expected, i32 %desired) {
+
+; CHECK: %[[#object:]] = OpFunctionParameter %[[#int_ptr8]]
+; CHECK: %[[#expected:]] = OpFunctionParameter %[[#int_ptr8]]
+; CHECK: %[[#desired:]] = OpFunctionParameter %[[#int]]
+
+entry:
+ %object.addr = alloca ptr addrspace(4), align 4
+ %expected.addr = alloca ptr addrspace(4), align 4
+ %desired.addr = alloca i32, align 4
+ %strong_res = alloca i8, align 1
+ %res = alloca i8, align 1
+ %weak_res = alloca i8, align 1
+ store ptr addrspace(4) %object, ptr %object.addr, align 4
+ store ptr addrspace(4) %expected, ptr %expected.addr, align 4
+ store i32 %desired, ptr %desired.addr, align 4
+ %0 = load ptr addrspace(4), ptr %object.addr, align 4
+ %1 = load ptr addrspace(4), ptr %expected.addr, align 4
+ %2 = load i32, ptr %desired.addr, align 4
+
+; CHECK-DAG: OpStore %[[#object_addr:]] %[[#object]]
+; CHECK-DAG: OpStore %[[#expected_addr:]] %[[#expected]]
+; CHECK-DAG: OpStore %[[#desired_addr:]] %[[#desired]]
+
+; CHECK: %[[#Pointer:]] = OpLoad %[[#int_ptr]] %[[#]]
+; CHECK: %[[#exp:]] = OpLoad %[[#int_ptr]] %[[#]]
+; CHECK: %[[#Value:]] = OpLoad %[[#int]] %[[#desired_addr]]
+; CHECK: %[[#Comparator:]] = OpLoad %[[#int]] %[[#exp]]
+
+; CHECK: %[[#Result:]] = OpAtomicCompareExchange %[[#int]] %[[#]] %[[#DeviceScope]] %[[#SequentiallyConsistent_MS]] %[[#SequentiallyConsistent_MS]] %[[#Value]] %[[#Comparator]]
+ %call = call spir_func zeroext i1 @_Z30atomic_compare_exchange_strongPVU3AS4U7_AtomiciPU3AS4ii(ptr addrspace(4) %0, ptr addrspace(4) %1, i32 %2)
+
+; CHECK-NEXT: OpStore %[[#exp]] %[[#Result]]
+; CHECK-NEXT: %[[#CallRes:]] = OpIEqual %[[#bool]] %[[#Result]] %[[#Comparator]]
+; CHECK-NOT: %[[#Result]]
+
+ %frombool = zext i1 %call to i8
+ store i8 %frombool, ptr %strong_res, align 1
+ %3 = load i8, ptr %strong_res, align 1
+ %tobool = trunc i8 %3 to i1
+ %lnot = xor i1 %tobool, true
+ %frombool1 = zext i1 %lnot to i8
+ store i8 %frombool1, ptr %res, align 1
+ %4 = load ptr addrspace(4), ptr %object.addr, align 4
+ %5 = load ptr addrspace(4), ptr %expected.addr, align 4
+ %6 = load i32, ptr %desired.addr, align 4
+
+; CHECK: %[[#Pointer:]] = OpLoad %[[#int_ptr]] %[[#]]
+; CHECK: %[[#exp:]] = OpLoad %[[#int_ptr]] %[[#]]
+; CHECK: %[[#Value:]] = OpLoad %[[#int]] %[[#desired_addr]]
+; CHECK: %[[#ComparatorWeak:]] = OpLoad %[[#int]] %[[#exp]]
+
+; CHECK: %[[#Result:]] = OpAtomicCompareExchangeWeak %[[#int]] %[[#]] %[[#DeviceScope]] %[[#SequentiallyConsistent_MS]] %[[#SequentiallyConsistent_MS]] %[[#Value]] %[[#ComparatorWeak]]
+ %call2 = call spir_func zeroext i1 @_Z28atomic_compare_exchange_weakPVU3AS4U7_AtomiciPU3AS4ii(ptr addrspace(4) %4, ptr addrspace(4) %5, i32 %6)
+
+; CHECK-NEXT: OpStore %[[#exp]] %[[#Result]]
+; CHECK-NEXT: %[[#CallRes:]] = OpIEqual %[[#bool]] %[[#Result]] %[[#ComparatorWeak]]
+; CHECK-NOT: %[[#Result]]
+
+ %frombool3 = zext i1 %call2 to i8
+ store i8 %frombool3, ptr %weak_res, align 1
+ %7 = load i8, ptr %weak_res, align 1
+ %tobool4 = trunc i8 %7 to i1
+ %lnot5 = xor i1 %tobool4, true
+ %frombool6 = zext i1 %lnot5 to i8
+ store i8 %frombool6, ptr %res, align 1
+ ret void
+}
+
+declare spir_func zeroext i1 @_Z30atomic_compare_exchange_strongPVU3AS4U7_AtomiciPU3AS4ii(ptr addrspace(4), ptr addrspace(4), i32) #1
+declare spir_func zeroext i1 @_Z28atomic_compare_exchange_weakPVU3AS4U7_AtomiciPU3AS4ii(ptr addrspace(4), ptr addrspace(4), i32) #1
diff --git a/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll b/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll
index c6ee804..07fbed9 100644
--- a/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll
+++ b/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll
@@ -90,7 +90,7 @@ define i32 @test_tbegin_nofloat4(i32 %pad, ptr %ptr) {
; CHECK: tbegin 0, 65292
; CHECK: ipm %r2
; CHECK: srl %r2, 28
-; CHECK: ciblh %r2, 2, 0(%r14)
+; CHECK: bnhr %r14
; CHECK: mvhi 0(%r3), 0
; CHECK: br %r14
%res = call i32 @llvm.s390.tbegin.nofloat(ptr null, i32 65292)
@@ -219,7 +219,7 @@ define i32 @test_tend2(i32 %pad, ptr %ptr) {
; CHECK: tend
; CHECK: ipm %r2
; CHECK: srl %r2, 28
-; CHECK: ciblh %r2, 2, 0(%r14)
+; CHECK: bnhr %r14
; CHECK: mvhi 0(%r3), 0
; CHECK: br %r14
%res = call i32 @llvm.s390.tend()
diff --git a/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-01.ll b/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-01.ll
new file mode 100644
index 0000000..6b8746e
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-01.ll
@@ -0,0 +1,738 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+; Test implementation of combining br_ccmask for flag output operand, and
+; optimizing ipm sequence using conditional branches.
+
+declare void @dummy()
+
+; Check a case where the cc is used as an integer.
+; Just (srl (ipm)) sequence without optimization.
+define i32 @test(ptr %a) {
+; CHECK-LABEL: test:
+; CHECK: # %bb.0:
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ipm %r2
+; CHECK-NEXT: srl %r2, 28
+; CHECK-NEXT: br %r14
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ ret i32 %cc
+}
+
+; Test-1(f1_0_*). Test all 14 valid combinations, where cc is being used for
+; branching.
+
+; Check (cc == 0).
+define void @f1_0_eq_0(ptr %a) {
+; CHECK-LABEL: f1_0_eq_0:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jge dummy@PLT
+; CHECK-NEXT: .LBB1_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp eq i32 %cc, 0
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc != 0).
+define void @f1_0_ne_0(ptr %a) {
+; CHECK-LABEL: f1_0_ne_0:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgne dummy@PLT
+; CHECK-NEXT: .LBB2_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp ugt i32 %cc, 0
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 1).
+define void @f1_0_eq_1(ptr %a) {
+; CHECK-LABEL: f1_0_eq_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgl dummy@PLT
+; CHECK-NEXT: .LBB3_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp eq i32 %cc, 1
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc != 1).
+define void @f1_0_ne_1(ptr %a) {
+; CHECK-LABEL: f1_0_ne_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnl dummy@PLT
+; CHECK-NEXT: .LBB4_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp ne i32 %cc, 1
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 2).
+define void @f1_0_eq_2(ptr %a) {
+; CHECK-LABEL: f1_0_eq_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgh dummy@PLT
+; CHECK-NEXT: .LBB5_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp eq i32 %cc, 2
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc != 2).
+define void @f1_0_ne_2(ptr %a) {
+; CHECK-LABEL: f1_0_ne_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnh dummy@PLT
+; CHECK-NEXT: .LBB6_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp ne i32 %cc, 2
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 3).
+define void @f1_0_eq_3(ptr %a) {
+; CHECK-LABEL: f1_0_eq_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgo dummy@PLT
+; CHECK-NEXT: .LBB7_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp eq i32 %cc, 3
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc != 3).
+define void @f1_0_ne_3(ptr %a) {
+; CHECK-LABEL: f1_0_ne_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgno dummy@PLT
+; CHECK-NEXT: .LBB8_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp ult i32 %cc, 3
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 0|1).
+define void @f1_0_01(ptr %a) {
+; CHECK-LABEL: f1_0_01:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgle dummy@PLT
+; CHECK-NEXT: .LBB9_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp ult i32 %cc, 2
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 0|2).
+define void @f1_0_02(ptr %a) {
+; CHECK-LABEL: f1_0_02:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jghe dummy@PLT
+; CHECK-NEXT: .LBB10_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cmp = icmp eq i32 %and, 0
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 0|3).
+define void @f1_0_03(ptr %a) {
+; CHECK-LABEL: f1_0_03:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnlh dummy@PLT
+; CHECK-NEXT: .LBB11_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp0 = icmp ne i32 %cc, 0
+ %cmp3 = icmp ne i32 %cc, 3
+ %cmp.inv = and i1 %cmp0, %cmp3
+ br i1 %cmp.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 1|2).
+define void @f1_0_12(ptr %a) {
+; CHECK-LABEL: f1_0_12:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jglh dummy@PLT
+; CHECK-NEXT: .LBB12_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq1 = icmp eq i32 %cc, 1
+ %cmpeq2 = icmp eq i32 %cc, 2
+ %cmp = or i1 %cmpeq1, %cmpeq2
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 1|3).
+define void @f1_0_13(ptr %a) {
+; CHECK-LABEL: f1_0_13:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnhe dummy@PLT
+; CHECK-NEXT: .LBB13_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq1 = icmp eq i32 %cc, 1
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cmp = or i1 %cmpeq1, %cmpeq3
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check (cc == 2|3).
+define void @f1_0_23(ptr %a) {
+; CHECK-LABEL: f1_0_23:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnle dummy@PLT
+; CHECK-NEXT: .LBB14_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmp = icmp ugt i32 %cc, 1
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Test-2(f1_1_*/f1_2_*/fl_3_*/f1_4_*).
+; Test Mixed patterns involving Binary Ops.
+
+; Check 'add' for (cc != 0).
+define void @f1_1_1(ptr %a) {
+; CHECK-LABEL: f1_1_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgne dummy@PLT
+; CHECK-NEXT: .LBB15_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cmp = icmp ult i32 %add, 3
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'add' for (cc == 1|2).
+define void @f1_1_2(ptr %a) {
+; CHECK-LABEL: f1_1_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jglh dummy@PLT
+; CHECK-NEXT: .LBB16_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cmp = icmp ult i32 %add, 2
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'add' for (cc == 1|2).
+define void @f1_1_3(ptr %a) {
+; CHECK-LABEL: f1_1_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jglh dummy@PLT
+; CHECK-NEXT: .LBB17_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -3
+ %cmp.inv = icmp ult i32 %add, -2
+ br i1 %cmp.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'and' with one operand cc and other select_ccmask(cc !=1).
+define void @f1_2_1(ptr %a) {
+; CHECK-LABEL: f1_2_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnl dummy@PLT
+; CHECK-NEXT: .LBB18_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpne0 = icmp ne i32 %andcc, 0
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cmp.inv = and i1 %cmpne3, %cmpne0
+ br i1 %cmp.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'and' with both operands select_ccmask(cc != 2).
+define void @f1_2_2(ptr %a) {
+; CHECK-LABEL: f1_2_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnh dummy@PLT
+; CHECK-NEXT: .LBB19_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %ugt1 = icmp samesign ugt i32 %cc, 1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %and.cond.inv = and i1 %ugt1, %cmpne3
+ br i1 %and.cond.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'and/tm' for (cc == 0|2).
+define void @f1_2_3(ptr %a) {
+; CHECK-LABEL: f1_2_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jghe dummy@PLT
+; CHECK-NEXT: .LBB20_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cmp = icmp eq i32 %and, 0
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'and/tm' for (cc == 1|3).
+define void @f1_2_4(ptr %a) {
+; CHECK-LABEL: f1_2_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnhe dummy@PLT
+; CHECK-NEXT: .LBB21_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cmp = icmp eq i32 %and, 0
+ br i1 %cmp, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1).
+define void @f1_2_5(ptr %a) {
+; CHECK-LABEL: f1_2_5:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnl dummy@PLT
+; CHECK-NEXT: .LBB22_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cmp = xor i1 %cmpne3, %trunc
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check nested 'xor' cc with select_ccmask(cc != 1).
+define void @f1_3_1(ptr %a) {
+; CHECK-LABEL: f1_3_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnl dummy@PLT
+; CHECK-NEXT: .LBB23_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq0 = icmp eq i32 %cc, 0
+ %cmpeq2 = icmp eq i32 %cc, 2
+ %xor = xor i1 %cmpeq0, %cmpeq2
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cmp.inv = xor i1 %cmpne3, %xor
+ br i1 %cmp.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=1).
+define void @f1_3_2(ptr %a) {
+; CHECK-LABEL: f1_3_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnl dummy@PLT
+; CHECK-NEXT: .LBB24_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cmp.inv = xor i1 %cmpeq3, %trunc
+ br i1 %cmp.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=2).
+define void @f1_3_3(ptr %a) {
+; CHECK-LABEL: f1_3_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgnh dummy@PLT
+; CHECK-NEXT: .LBB25_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne0 = icmp ne i32 %cc, 0
+ %cmp.cond.inv = xor i1 %cmpne0, %trunc
+ br i1 %cmp.cond.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'or' with both operands are select_ccmask one with TM and other with
+; ICMP(cc == 1).
+define void @f1_4_1(ptr %a) {
+; CHECK-LABEL: f1_4_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgl dummy@PLT
+; CHECK-NEXT: .LBB26_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpeq0 = icmp eq i32 %andcc, 0
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cmp.cond.inv = or i1 %cmpeq3, %cmpeq0
+ br i1 %cmp.cond.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'or' for (cc == 0|1).
+define void @f1_4_2(ptr %a) {
+; CHECK-LABEL: f1_4_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgle dummy@PLT
+; CHECK-NEXT: .LBB27_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cmp.inv = icmp samesign ugt i32 %or, -3
+ br i1 %cmp.inv, label %exit, label %branch
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
+; Check 'or' for (cc == 0|1).
+define void @f1_4_3(ptr %a) {
+; CHECK-LABEL: f1_4_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jgle dummy@PLT
+; CHECK-NEXT: .LBB28_1: # %exit
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cmp = icmp samesign ult i32 %or, -2
+ br i1 %cmp, label %branch, label %exit
+branch:
+ tail call void @dummy()
+ br label %exit
+exit:
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-02.ll b/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-02.ll
new file mode 100644
index 0000000..b9b9a4b
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-02.ll
@@ -0,0 +1,1665 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+; Test implementation of combining select_ccmask for flag output operand and
+; optimizing ipm sequence using conditional branches.
+
+; Test-1(f2_0_*): Both TrueVal and FalseVal non-const(14-valid CCMask).
+
+; Check (cc == 0).
+define i64 @f2_0_eq_0(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_eq_0:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ber %r14
+; CHECK-NEXT: .LBB0_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp eq i32 %cc, 0
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc != 0).
+define i64 @f2_0_ne_0(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_ne_0:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bner %r14
+; CHECK-NEXT: .LBB1_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp ugt i32 %cc, 0
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc == 1).
+define i64 @f2_0_eq_1(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_eq_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB2_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp eq i32 %cc, 1
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc != 1).
+define i64 @f2_0_ne_1(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_ne_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB3_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp ne i32 %cc, 1
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc == 2).
+define i64 @f2_0_eq_2(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_eq_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB4_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp eq i32 %cc, 2
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc != 2).
+define i64 @f2_0_ne_2(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_ne_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnhr %r14
+; CHECK-NEXT: .LBB5_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp ne i32 %cc, 2
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc == 3).
+define i64 @f2_0_eq_3(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_eq_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bor %r14
+; CHECK-NEXT: .LBB6_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp eq i32 %cc, 3
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc != 3).
+define i64 @f2_0_ne_3(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_ne_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnor %r14
+; CHECK-NEXT: .LBB7_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp ult i32 %cc, 3
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc == 0|1).
+define i64 @f2_0_01(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_01:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bler %r14
+; CHECK-NEXT: .LBB8_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp ult i32 %cc, 2
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc == 0|2).
+define i64 @f2_0_02(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_02:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB9_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %and = and i32 %cc, 1
+ %cond = icmp eq i32 %and, 0
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check (cc == 0|3).
+define i64 @f2_0_03(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_0_03:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blhr %r14
+; CHECK-NEXT: .LBB10_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cmp0 = icmp ne i32 %cc, 0
+ %cmp3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %cmp0, %cmp3
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check (cc == 1|2).
+define i64 @f2_0_12(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_0_12:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlhr %r14
+; CHECK-NEXT: .LBB11_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %add = add nsw i32 %cc, -3
+ %cond.inv = icmp ult i32 %add, -2
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check (cc == 1|3).
+define i64 @f2_0_13(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_0_13:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB12_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %and = and i32 %cc, 1
+ %cond.inv = icmp eq i32 %and, 0
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check (cc == 2|3).
+define i64 @f2_0_23(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_0_23:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnler %r14
+; CHECK-NEXT: .LBB13_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %cond = icmp ugt i32 %cc, 1
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Test-2(f2_1_*/f2_2_*/f2_3_*/f2_4_*).
+; Both TrueVal and FalseVal are non-const with mixed patterns involving
+; Binary Ops.
+
+; Check 'add' for (cc != 0).
+define i64 @f2_1_1(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_1_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bner %r14
+; CHECK-NEXT: .LBB14_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 3
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f2_1_2(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_1_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blhr %r14
+; CHECK-NEXT: .LBB15_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 2
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f2_1_3(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_1_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlhr %r14
+; CHECK-NEXT: .LBB16_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -3
+ %cond.inv = icmp ult i32 %add, -2
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'and' with one operand cc and other select_ccmask(cc !=1).
+define i64 @f2_2_1(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_2_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB17_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpne0 = icmp ne i32 %andcc, 0
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %cmpne3, %cmpne0
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'and' with both operands select_ccmask(cc != 2).
+define i64 @f2_2_2(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_2_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB18_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %ugt1 = icmp samesign ugt i32 %cc, 1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %ugt1, %cmpne3
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 0|2).
+define i64 @f2_2_3(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_2_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB19_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond = icmp eq i32 %and, 0
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 1|3).
+define i64 @f2_2_4(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_2_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB20_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond.inv = icmp eq i32 %and, 0
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1).
+define i64 @f2_2_5(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_2_5:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB21_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond = xor i1 %cmpne3, %trunc
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+
+; Check nested 'xor' cc with select_ccmask(cc != 1).
+define i64 @f2_3_1(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_3_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB22_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq0 = icmp eq i32 %cc, 0
+ %cmpeq2 = icmp eq i32 %cc, 2
+ %xor = xor i1 %cmpeq0, %cmpeq2
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = xor i1 %cmpne3, %xor
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=1).
+define i64 @f2_3_2(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_3_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB23_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = xor i1 %cmpeq3, %trunc
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=2).
+define i64 @f2_3_3(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_3_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB24_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne0 = icmp ne i32 %cc, 0
+ %cond.inv = xor i1 %cmpne0, %trunc
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'or' with both operands select_ccmask with TM and ICMP(cc == 1).
+define i64 @f2_4_1(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_4_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB25_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpeq0 = icmp eq i32 %andcc, 0
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = or i1 %cmpeq3, %cmpeq0
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f2_4_2(i64 %y, i64 %x, ptr %a) {
+; CHECK-LABEL: f2_4_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnler %r14
+; CHECK-NEXT: .LBB26_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond.inv = icmp samesign ugt i32 %or, -3
+ %res = select i1 %cond.inv, i64 %y, i64 %x
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f2_4_3(i64 %x, i64 %y, ptr %a) {
+; CHECK-LABEL: f2_4_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r4), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bler %r14
+; CHECK-NEXT: .LBB27_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond = icmp samesign ult i32 %or, -2
+ %res = select i1 %cond, i64 %x, i64 %y
+ ret i64 %res
+}
+
+; Test-3(f3_1_*/f3_2_*/f3_3_*/f3_4_*).
+; TrueVal is non-const and FalseVal is const with mixed patterns involving
+; Binary Ops.
+
+; Check 'add' for (cc != 0).
+define i64 @f3_1_1(i64 %x, ptr %a) {
+; CHECK-LABEL: f3_1_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bner %r14
+; CHECK-NEXT: .LBB28_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 3
+ %res = select i1 %cond, i64 %x, i64 5
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f3_1_2(i64 %x, ptr %a) {
+; CHECK-LABEL: f3_1_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blhr %r14
+; CHECK-NEXT: .LBB29_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 2
+ %res = select i1 %cond, i64 %x, i64 5
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f3_1_3(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_1_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bnlhr %r14
+; CHECK-NEXT: .LBB30_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -3
+ %cond.inv = icmp ult i32 %add, -2
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'and' with one operand cc and other select_ccmask(cc !=1).
+define i64 @f3_2_1(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_2_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB31_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpne0 = icmp ne i32 %andcc, 0
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %cmpne3, %cmpne0
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'and' with both operands select_ccmask(cc != 2).
+define i64 @f3_2_2(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_2_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB32_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %ugt1 = icmp samesign ugt i32 %cc, 1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %ugt1, %cmpne3
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 0|2).
+define i64 @f3_2_3(i64 %x, ptr %a) {
+; CHECK-LABEL: f3_2_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB33_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond = icmp eq i32 %and, 0
+ %res = select i1 %cond, i64 %x, i64 5
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 1|3).
+define i64 @f3_2_4(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_2_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB34_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond.inv = icmp eq i32 %and, 0
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1).
+define i64 @f3_2_5(i64 %x, ptr %a) {
+; CHECK-LABEL: f3_2_5:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB35_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond = xor i1 %cmpne3, %trunc
+ %res = select i1 %cond, i64 %x, i64 5
+ ret i64 %res
+}
+
+
+; Check nested 'xor' cc with select_ccmask(cc != 1).
+define i64 @f3_3_1(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_3_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB36_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq0 = icmp eq i32 %cc, 0
+ %cmpeq2 = icmp eq i32 %cc, 2
+ %xor = xor i1 %cmpeq0, %cmpeq2
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = xor i1 %cmpne3, %xor
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=1).
+define i64 @f3_3_2(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_3_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB37_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = xor i1 %cmpeq3, %trunc
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=2).
+define i64 @f3_3_3(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_3_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB38_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne0 = icmp ne i32 %cc, 0
+ %cond.inv = xor i1 %cmpne0, %trunc
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'or' with both operands select_ccmask with TM and ICMP(cc == 1).
+define i64 @f3_4_1(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_4_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB39_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpeq0 = icmp eq i32 %andcc, 0
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = or i1 %cmpeq3, %cmpeq0
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f3_4_2(ptr %a, i64 %x) {
+; CHECK-LABEL: f3_4_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bnler %r14
+; CHECK-NEXT: .LBB40_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond.inv = icmp samesign ugt i32 %or, -3
+ %res = select i1 %cond.inv, i64 5, i64 %x
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f3_4_3(i64 %x, ptr %a) {
+; CHECK-LABEL: f3_4_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bler %r14
+; CHECK-NEXT: .LBB41_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond = icmp samesign ult i32 %or, -2
+ %res = select i1 %cond, i64 %x, i64 5
+ ret i64 %res
+}
+
+
+; Test-4(f4_1_*/f4_2_*/f4_3_*/f4_4_*).
+; TrueVal is const and FalseVal is non-const with mixed patterns involving
+; Binary Ops.
+
+; Check 'add' for (cc != 0).
+define i64 @f4_1_1(ptr %a, i64 %y) {
+; CHECK-LABEL: f4_1_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bner %r14
+; CHECK-NEXT: .LBB42_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 3
+ %res = select i1 %cond, i64 15, i64 %y
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f4_1_2(ptr %a, i64 %y) {
+; CHECK-LABEL: f4_1_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: blhr %r14
+; CHECK-NEXT: .LBB43_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 2
+ %res = select i1 %cond, i64 15, i64 %y
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f4_1_3(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_1_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlhr %r14
+; CHECK-NEXT: .LBB44_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -3
+ %cond.inv = icmp ult i32 %add, -2
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'and' with one operand cc and other select_ccmask(cc !=1).
+define i64 @f4_2_1(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_2_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB45_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpne0 = icmp ne i32 %andcc, 0
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %cmpne3, %cmpne0
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'and' with both operands select_ccmask(cc != 2).
+define i64 @f4_2_2(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_2_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB46_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %ugt1 = icmp samesign ugt i32 %cc, 1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %ugt1, %cmpne3
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 0|2).
+define i64 @f4_2_3(ptr %a, i64 %y) {
+; CHECK-LABEL: f4_2_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB47_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond = icmp eq i32 %and, 0
+ %res = select i1 %cond, i64 15, i64 %y
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 1|3).
+define i64 @f4_2_4(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_2_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB48_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond.inv = icmp eq i32 %and, 0
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1).
+define i64 @f4_2_5(ptr %a, i64 %y) {
+; CHECK-LABEL: f4_2_5:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB49_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond = xor i1 %cmpne3, %trunc
+ %res = select i1 %cond, i64 15, i64 %y
+ ret i64 %res
+}
+
+
+; Check nested 'xor' cc with select_ccmask(cc != 1).
+define i64 @f4_3_1(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_3_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB50_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq0 = icmp eq i32 %cc, 0
+ %cmpeq2 = icmp eq i32 %cc, 2
+ %xor = xor i1 %cmpeq0, %cmpeq2
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = xor i1 %cmpne3, %xor
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=1).
+define i64 @f4_3_2(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_3_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB51_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = xor i1 %cmpeq3, %trunc
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=2).
+define i64 @f4_3_3(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_3_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB52_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne0 = icmp ne i32 %cc, 0
+ %cond.inv = xor i1 %cmpne0, %trunc
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'or' with both operands select_ccmask with TM and ICMP(cc == 1).
+define i64 @f4_4_1(i64 %y,ptr %a) {
+; CHECK-LABEL: f4_4_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB53_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpeq0 = icmp eq i32 %andcc, 0
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = or i1 %cmpeq3, %cmpeq0
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f4_4_2(i64 %y, ptr %a) {
+; CHECK-LABEL: f4_4_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r3), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: bnler %r14
+; CHECK-NEXT: .LBB54_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond.inv = icmp samesign ugt i32 %or, -3
+ %res = select i1 %cond.inv, i64 %y, i64 15
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f4_4_3(ptr %a, i64 %y) {
+; CHECK-LABEL: f4_4_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bler %r14
+; CHECK-NEXT: .LBB55_1: # %entry
+; CHECK-NEXT: lgr %r2, %r3
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond = icmp samesign ult i32 %or, -2
+ %res = select i1 %cond, i64 15, i64 %y
+ ret i64 %res
+}
+
+; Test-5(f5_1_*/f5_2_*/f5_3_*/f5_4_*).
+; Both TrueVal and FalseVal are const with mixed patterns involving
+; Binary Ops.
+
+
+; Check 'add' for (cc != 0).
+define i64 @f5_1_1(ptr %a) {
+; CHECK-LABEL: f5_1_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bner %r14
+; CHECK-NEXT: .LBB56_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 3
+ %res = select i1 %cond, i64 15, i64 5
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f5_1_2(ptr %a) {
+; CHECK-LABEL: f5_1_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: blhr %r14
+; CHECK-NEXT: .LBB57_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -1
+ %cond = icmp ult i32 %add, 2
+ %res = select i1 %cond, i64 15, i64 5
+ ret i64 %res
+}
+
+; Check 'add' for (cc == 1|2).
+define i64 @f5_1_3(ptr %a) {
+; CHECK-LABEL: f5_1_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bnlhr %r14
+; CHECK-NEXT: .LBB58_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %add = add nsw i32 %cc, -3
+ %cond.inv = icmp ult i32 %add, -2
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'and' with one operand cc and other select_ccmask(cc !=1).
+define i64 @f5_2_1(ptr %a) {
+; CHECK-LABEL: f5_2_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB59_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpne0 = icmp ne i32 %andcc, 0
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %cmpne3, %cmpne0
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'and' with both operands select_ccmask(cc != 2).
+define i64 @f5_2_2(ptr %a) {
+; CHECK-LABEL: f5_2_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB60_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %ugt1 = icmp samesign ugt i32 %cc, 1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = and i1 %ugt1, %cmpne3
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 0|2).
+define i64 @f5_2_3(ptr %a) {
+; CHECK-LABEL: f5_2_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB61_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond = icmp eq i32 %and, 0
+ %res = select i1 %cond, i64 15, i64 5
+ ret i64 %res
+}
+
+; Check 'and/tm' for (cc == 1|3).
+define i64 @f5_2_4(ptr %a) {
+; CHECK-LABEL: f5_2_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB62_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %and = and i32 %cc, 1
+ %cond.inv = icmp eq i32 %and, 0
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1).
+define i64 @f5_2_5(ptr %a) {
+; CHECK-LABEL: f5_2_5:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB63_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond = xor i1 %cmpne3, %trunc
+ %res = select i1 %cond, i64 15, i64 5
+ ret i64 %res
+}
+
+
+; Check nested 'xor' cc with select_ccmask(cc != 1).
+define i64 @f5_3_1(ptr %a) {
+; CHECK-LABEL: f5_3_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB64_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %cmpeq0 = icmp eq i32 %cc, 0
+ %cmpeq2 = icmp eq i32 %cc, 2
+ %xor = xor i1 %cmpeq0, %cmpeq2
+ %cmpne3 = icmp ne i32 %cc, 3
+ %cond.inv = xor i1 %cmpne3, %xor
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=1).
+define i64 @f5_3_2(ptr %a) {
+; CHECK-LABEL: f5_3_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: blr %r14
+; CHECK-NEXT: .LBB65_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = xor i1 %cmpeq3, %trunc
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check branching on 'tm' and 'xor' with one operand cc and the other
+; select_ccmask(cc !=2).
+define i64 @f5_3_3(ptr %a) {
+; CHECK-LABEL: f5_3_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bhr %r14
+; CHECK-NEXT: .LBB66_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %trunc = trunc i32 %cc to i1
+ %cmpne0 = icmp ne i32 %cc, 0
+ %cond.inv = xor i1 %cmpne0, %trunc
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'or' with both operands select_ccmask with TM and ICMP(cc == 1).
+define i64 @f5_4_1(ptr %a) {
+; CHECK-LABEL: f5_4_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bnlr %r14
+; CHECK-NEXT: .LBB67_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %andcc = and i32 %cc, 1
+ %cmpeq0 = icmp eq i32 %andcc, 0
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %cond.inv = or i1 %cmpeq3, %cmpeq0
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f5_4_2(ptr %a) {
+; CHECK-LABEL: f5_4_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: bnler %r14
+; CHECK-NEXT: .LBB68_1: # %entry
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond.inv = icmp samesign ugt i32 %or, -3
+ %res = select i1 %cond.inv, i64 5, i64 15
+ ret i64 %res
+}
+
+; Check 'or' for (cc == 0|1).
+define i64 @f5_4_3(ptr %a) {
+; CHECK-LABEL: f5_4_3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bler %r14
+; CHECK-NEXT: .LBB69_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %tmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %tmp)
+ %or = or disjoint i32 %cc, -4
+ %cond = icmp samesign ult i32 %or, -2
+ %res = select i1 %cond, i64 15, i64 5
+ ret i64 %res
+}
+
+; Nested select_ccmask with TrueVal and FalseVal swapped with each other.
+define i64 @f6_1(ptr %a) {
+; CHECK-LABEL: f6_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: alsi 0(%r2), -1
+; CHECK-EMPTY:
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: lghi %r2, 15
+; CHECK-NEXT: bher %r14
+; CHECK-NEXT: .LBB70_1: # %entry
+; CHECK-NEXT: lghi %r2, 5
+; CHECK-NEXT: br %r14
+entry:
+ %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a)
+ %cmp = icmp ult i32 %cc, 4
+ tail call void @llvm.assume(i1 %cmp)
+ %andcc = and i32 %cc, 1
+ %cmpeq0 = icmp eq i32 %andcc, 0
+ %cmpeq3 = icmp eq i32 %cc, 3
+ %select = select i1 %cmpeq3, i64 5, i64 15
+ %res = select i1 %cmpeq0, i64 %select, i64 5
+ ret i64 %res
+}
+
diff --git a/llvm/test/CodeGen/WebAssembly/bulk-memory.ll b/llvm/test/CodeGen/WebAssembly/bulk-memory.ll
index ae170d7..d949068 100644
--- a/llvm/test/CodeGen/WebAssembly/bulk-memory.ll
+++ b/llvm/test/CodeGen/WebAssembly/bulk-memory.ll
@@ -104,6 +104,31 @@ define void @memset_i32(ptr %dest, i8 %val, i32 %len) {
ret void
}
+; CHECK-LABEL: memcpy_0:
+; CHECK-NEXT: .functype memcpy_0 (i32, i32) -> ()
+; CHECK-NEXT: return
+define void @memcpy_0(ptr %dest, ptr %src) {
+ call void @llvm.memcpy.p0.p0.i32(ptr %dest, ptr %src, i32 0, i1 0)
+ ret void
+}
+
+; CHECK-LABEL: memmove_0:
+; CHECK-NEXT: .functype memmove_0 (i32, i32) -> ()
+; CHECK-NEXT: return
+define void @memmove_0(ptr %dest, ptr %src) {
+ call void @llvm.memmove.p0.p0.i32(ptr %dest, ptr %src, i32 0, i1 0)
+ ret void
+}
+
+; CHECK-LABEL: memset_0:
+; NO-BULK-MEM-NOT: memory.fill
+; BULK-MEM-NEXT: .functype memset_0 (i32, i32) -> ()
+; BULK-MEM-NEXT: return
+define void @memset_0(ptr %dest, i8 %val) {
+ call void @llvm.memset.p0.i32(ptr %dest, i8 %val, i32 0, i1 0)
+ ret void
+}
+
; CHECK-LABEL: memcpy_1:
; CHECK-NEXT: .functype memcpy_1 (i32, i32) -> ()
; CHECK-NEXT: i32.load8_u $push[[L0:[0-9]+]]=, 0($1)
@@ -137,14 +162,8 @@ define void @memset_1(ptr %dest, i8 %val) {
; CHECK-LABEL: memcpy_1024:
; NO-BULK-MEM-NOT: memory.copy
; BULK-MEM-NEXT: .functype memcpy_1024 (i32, i32) -> ()
-; BULK-MEM-NEXT: block
; BULK-MEM-NEXT: i32.const $push[[L0:[0-9]+]]=, 1024
-; BULK-MEM-NEXT: i32.eqz $push[[L1:[0-9]+]]=, $pop[[L0]]
-; BULK-MEM-NEXT: br_if 0, $pop[[L1]]
-; BULK-MEM-NEXT: i32.const $push[[L2:[0-9]+]]=, 1024
-; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L2]]
-; BULK-MEM-NEXT: .LBB{{.*}}:
-; BULK-MEM-NEXT: end_block
+; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L0]]
; BULK-MEM-NEXT: return
define void @memcpy_1024(ptr %dest, ptr %src) {
call void @llvm.memcpy.p0.p0.i32(ptr %dest, ptr %src, i32 1024, i1 0)
@@ -154,14 +173,8 @@ define void @memcpy_1024(ptr %dest, ptr %src) {
; CHECK-LABEL: memmove_1024:
; NO-BULK-MEM-NOT: memory.copy
; BULK-MEM-NEXT: .functype memmove_1024 (i32, i32) -> ()
-; BULK-MEM-NEXT: block
; BULK-MEM-NEXT: i32.const $push[[L0:[0-9]+]]=, 1024
-; BULK-MEM-NEXT: i32.eqz $push[[L1:[0-9]+]]=, $pop[[L0]]
-; BULK-MEM-NEXT: br_if 0, $pop[[L1]]
-; BULK-MEM-NEXT: i32.const $push[[L2:[0-9]+]]=, 1024
-; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L2]]
-; BULK-MEM-NEXT: .LBB{{.*}}:
-; BULK-MEM-NEXT: end_block
+; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L0]]
; BULK-MEM-NEXT: return
define void @memmove_1024(ptr %dest, ptr %src) {
call void @llvm.memmove.p0.p0.i32(ptr %dest, ptr %src, i32 1024, i1 0)
@@ -171,14 +184,8 @@ define void @memmove_1024(ptr %dest, ptr %src) {
; CHECK-LABEL: memset_1024:
; NO-BULK-MEM-NOT: memory.fill
; BULK-MEM-NEXT: .functype memset_1024 (i32, i32) -> ()
-; BULK-MEM-NEXT: block
; BULK-MEM-NEXT: i32.const $push[[L0:[0-9]+]]=, 1024
-; BULK-MEM-NEXT: i32.eqz $push[[L1:[0-9]+]]=, $pop[[L0]]
-; BULK-MEM-NEXT: br_if 0, $pop[[L1]]
-; BULK-MEM-NEXT: i32.const $push[[L2:[0-9]+]]=, 1024
-; BULK-MEM-NEXT: memory.fill 0, $0, $1, $pop[[L2]]
-; BULK-MEM-NEXT: .LBB{{.*}}:
-; BULK-MEM-NEXT: end_block
+; BULK-MEM-NEXT: memory.fill 0, $0, $1, $pop[[L0]]
; BULK-MEM-NEXT: return
define void @memset_1024(ptr %dest, i8 %val) {
call void @llvm.memset.p0.i32(ptr %dest, i8 %val, i32 1024, i1 0)
@@ -201,17 +208,11 @@ define void @memset_1024(ptr %dest, i8 %val) {
; BULK-MEM-NEXT: .functype memcpy_alloca_src (i32) -> ()
; BULK-MEM-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer
; BULK-MEM-NEXT: i32.const $push[[L1:[0-9]+]]=, 112
-; BULK-MEM-NEXT: i32.sub $[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]
-; BULK-MEM-NEXT: block
-; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 100
-; BULK-MEM-NEXT: i32.eqz $push[[L4:[0-9]+]]=, $pop[[L3]]
-; BULK-MEM-NEXT: br_if 0, $pop[[L4]]
-; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 12
-; BULK-MEM-NEXT: i32.add $push[[L6:[0-9]+]]=, $[[L2]], $pop[[L5]]
-; BULK-MEM-NEXT: i32.const $push[[L7:[0-9]+]]=, 100
-; BULK-MEM-NEXT: memory.copy 0, 0, $0, $pop[[L6]], $pop[[L7]]
-; BULK-MEM-NEXT: .LBB{{.*}}:
-; BULK-MEM-NEXT: end_block
+; BULK-MEM-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]
+; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 12
+; BULK-MEM-NEXT: i32.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]
+; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 100
+; BULK-MEM-NEXT: memory.copy 0, 0, $0, $pop[[L4]], $pop[[L5]]
; BULK-MEM-NEXT: return
define void @memcpy_alloca_src(ptr %dst) {
%a = alloca [100 x i8]
@@ -224,17 +225,11 @@ define void @memcpy_alloca_src(ptr %dst) {
; BULK-MEM-NEXT: .functype memcpy_alloca_dst (i32) -> ()
; BULK-MEM-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer
; BULK-MEM-NEXT: i32.const $push[[L1:[0-9]+]]=, 112
-; BULK-MEM-NEXT: i32.sub $[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]
-; BULK-MEM-NEXT: block
-; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 100
-; BULK-MEM-NEXT: i32.eqz $push[[L4:[0-9]+]]=, $pop[[L3]]
-; BULK-MEM-NEXT: br_if 0, $pop[[L4]]
-; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 12
-; BULK-MEM-NEXT: i32.add $push[[L6:[0-9]+]]=, $[[L2]], $pop[[L5]]
-; BULK-MEM-NEXT: i32.const $push[[L7:[0-9]+]]=, 100
-; BULK-MEM-NEXT: memory.copy 0, 0, $pop[[L6]], $0, $pop[[L7]]
-; BULK-MEM-NEXT: .LBB{{.*}}:
-; BULK-MEM-NEXT: end_block
+; BULK-MEM-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]
+; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 12
+; BULK-MEM-NEXT: i32.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]
+; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 100
+; BULK-MEM-NEXT: memory.copy 0, 0, $pop[[L4]], $0, $pop[[L5]]
; BULK-MEM-NEXT: return
define void @memcpy_alloca_dst(ptr %src) {
%a = alloca [100 x i8]
@@ -247,17 +242,11 @@ define void @memcpy_alloca_dst(ptr %src) {
; BULK-MEM-NEXT: .functype memset_alloca (i32) -> ()
; BULK-MEM-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer
; BULK-MEM-NEXT: i32.const $push[[L1:[0-9]+]]=, 112
-; BULK-MEM-NEXT: i32.sub $1=, $pop[[L0]], $pop[[L1]]
-; BULK-MEM-NEXT: block
-; BULK-MEM-NEXT: i32.const $push[[L2:[0-9]+]]=, 100
-; BULK-MEM-NEXT: i32.eqz $push[[L3:[0-9]+]]=, $pop[[L2]]
-; BULK-MEM-NEXT: br_if 0, $pop[[L3]]
-; BULK-MEM-NEXT: i32.const $push[[L4:[0-9]+]]=, 12
-; BULK-MEM-NEXT: i32.add $push[[L5:[0-9]+]]=, $1, $pop[[L4]]
-; BULK-MEM-NEXT: i32.const $push[[L6:[0-9]+]]=, 100
-; BULK-MEM-NEXT: memory.fill 0, $pop[[L5]], $0, $pop[[L6]]
-; BULK-MEM-NEXT: .LBB{{.*}}:
-; BULK-MEM-NEXT: end_block
+; BULK-MEM-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]
+; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 12
+; BULK-MEM-NEXT: i32.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]
+; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 100
+; BULK-MEM-NEXT: memory.fill 0, $pop[[L4]], $0, $pop[[L5]]
; BULK-MEM-NEXT: return
define void @memset_alloca(i8 %val) {
%a = alloca [100 x i8]
diff --git a/llvm/test/CodeGen/WebAssembly/bulk-memory64.ll b/llvm/test/CodeGen/WebAssembly/bulk-memory64.ll
index 0cf8493..d0206a3 100644
--- a/llvm/test/CodeGen/WebAssembly/bulk-memory64.ll
+++ b/llvm/test/CodeGen/WebAssembly/bulk-memory64.ll
@@ -110,6 +110,31 @@ define void @memset_i32(ptr %dest, i8 %val, i64 %len) {
ret void
}
+; CHECK-LABEL: memcpy_0:
+; CHECK-NEXT: .functype memcpy_0 (i64, i64) -> ()
+; CHECK-NEXT: return
+define void @memcpy_0(ptr %dest, ptr %src) {
+ call void @llvm.memcpy.p0.p0.i64(ptr %dest, ptr %src, i64 0, i1 0)
+ ret void
+}
+
+; CHECK-LABEL: memmove_0:
+; CHECK-NEXT: .functype memmove_0 (i64, i64) -> ()
+; CHECK-NEXT: return
+define void @memmove_0(ptr %dest, ptr %src) {
+ call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 0, i1 0)
+ ret void
+}
+
+; CHECK-LABEL: memset_0:
+; NO-BULK-MEM-NOT: memory.fill
+; BULK-MEM-NEXT: .functype memset_0 (i64, i32) -> ()
+; BULK-MEM-NEXT: return
+define void @memset_0(ptr %dest, i8 %val) {
+ call void @llvm.memset.p0.i64(ptr %dest, i8 %val, i64 0, i1 0)
+ ret void
+}
+
; CHECK-LABEL: memcpy_1:
; CHECK-NEXT: .functype memcpy_1 (i64, i64) -> ()
; CHECK-NEXT: i32.load8_u $push[[L0:[0-9]+]]=, 0($1)
@@ -143,14 +168,8 @@ define void @memset_1(ptr %dest, i8 %val) {
; CHECK-LABEL: memcpy_1024:
; NO-BULK-MEM-NOT: memory.copy
; BULK-MEM-NEXT: .functype memcpy_1024 (i64, i64) -> ()
-; BULK-MEM-NEXT: block
-; BULK-MEM-NEXT: i64.const $push[[L1:[0-9]+]]=, 1024
-; BULK-MEM-NEXT: i64.eqz $push0=, $pop[[L1]]
-; BULK-MEM-NEXT: br_if 0, $pop0
; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 1024
; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L0]]
-; BULK-MEM-NEXT: .LBB{{.*}}:
-; BULK-MEM-NEXT: end_block
; BULK-MEM-NEXT: return
define void @memcpy_1024(ptr %dest, ptr %src) {
call void @llvm.memcpy.p0.p0.i64(ptr %dest, ptr %src, i64 1024, i1 0)
@@ -160,14 +179,8 @@ define void @memcpy_1024(ptr %dest, ptr %src) {
; CHECK-LABEL: memmove_1024:
; NO-BULK-MEM-NOT: memory.copy
; BULK-MEM-NEXT: .functype memmove_1024 (i64, i64) -> ()
-; BULK-MEM-NEXT: block
-; BULK-MEM-NEXT: i64.const $push[[L1:[0-9]+]]=, 1024
-; BULK-MEM-NEXT: i64.eqz $push0=, $pop[[L1]]
-; BULK-MEM-NEXT: br_if 0, $pop0
; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 1024
; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L0]]
-; BULK-MEM-NEXT: .LBB{{.*}}:
-; BULK-MEM-NEXT: end_block
; BULK-MEM-NEXT: return
define void @memmove_1024(ptr %dest, ptr %src) {
call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 1024, i1 0)
@@ -177,14 +190,8 @@ define void @memmove_1024(ptr %dest, ptr %src) {
; CHECK-LABEL: memset_1024:
; NO-BULK-MEM-NOT: memory.fill
; BULK-MEM-NEXT: .functype memset_1024 (i64, i32) -> ()
-; BULK-MEM-NEXT: block
-; BULK-MEM-NEXT: i64.const $push[[L1:[0-9]+]]=, 1024
-; BULK-MEM-NEXT: i64.eqz $push0=, $pop[[L1]]
-; BULK-MEM-NEXT: br_if 0, $pop0
; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 1024
; BULK-MEM-NEXT: memory.fill 0, $0, $1, $pop[[L0]]
-; BULK-MEM-NEXT: .LBB{{.*}}:
-; BULK-MEM-NEXT: end_block
; BULK-MEM-NEXT: return
define void @memset_1024(ptr %dest, i8 %val) {
call void @llvm.memset.p0.i64(ptr %dest, i8 %val, i64 1024, i1 0)
@@ -207,17 +214,11 @@ define void @memset_1024(ptr %dest, i8 %val) {
; BULK-MEM-NEXT: .functype memcpy_alloca_src (i64) -> ()
; BULK-MEM-NEXT: global.get $push[[L1:[0-9]+]]=, __stack_pointer
; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 112
-; BULK-MEM-NEXT: i64.sub $[[L2:[0-9]+]]=, $pop[[L1]], $pop[[L0]]
-; BULK-MEM-NEXT: block
-; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 100
-; BULK-MEM-NEXT: i64.eqz $push[[L4:[0-9]+]]=, $pop[[L3]]
-; BULK-MEM-NEXT: br_if 0, $pop[[L4]]
-; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 12
-; BULK-MEM-NEXT: i64.add $push[[L6:[0-9]+]]=, $[[L2]], $pop[[L5]]
-; BULK-MEM-NEXT: i64.const $push[[L7:[0-9]+]]=, 100
-; BULK-MEM-NEXT: memory.copy 0, 0, $0, $pop[[L6]], $pop[[L7]]
-; BULK-MEM-NEXT: .LBB{{.*}}:
-; BULK-MEM-NEXT: end_block
+; BULK-MEM-NEXT: i64.sub $push[[L2:[0-9]+]]=, $pop[[L1]], $pop[[L0]]
+; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 12
+; BULK-MEM-NEXT: i64.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]
+; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 100
+; BULK-MEM-NEXT: memory.copy 0, 0, $0, $pop[[L4]], $pop[[L5]]
; BULK-MEM-NEXT: return
define void @memcpy_alloca_src(ptr %dst) {
%a = alloca [100 x i8]
@@ -230,17 +231,11 @@ define void @memcpy_alloca_src(ptr %dst) {
; BULK-MEM-NEXT: .functype memcpy_alloca_dst (i64) -> ()
; BULK-MEM-NEXT: global.get $push[[L1:[0-9]+]]=, __stack_pointer
; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 112
-; BULK-MEM-NEXT: i64.sub $[[L2:[0-9]+]]=, $pop[[L1]], $pop[[L0]]
-; BULK-MEM-NEXT: block
-; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 100
-; BULK-MEM-NEXT: i64.eqz $push[[L4:[0-9]+]]=, $pop[[L3]]
-; BULK-MEM-NEXT: br_if 0, $pop[[L4]]
-; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 12
-; BULK-MEM-NEXT: i64.add $push[[L6:[0-9]+]]=, $[[L2]], $pop[[L5]]
-; BULK-MEM-NEXT: i64.const $push[[L7:[0-9]+]]=, 100
-; BULK-MEM-NEXT: memory.copy 0, 0, $pop[[L6]], $0, $pop[[L7]]
-; BULK-MEM-NEXT: .LBB{{.*}}:
-; BULK-MEM-NEXT: end_block
+; BULK-MEM-NEXT: i64.sub $push[[L2:[0-9]+]]=, $pop[[L1]], $pop[[L0]]
+; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 12
+; BULK-MEM-NEXT: i64.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]
+; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 100
+; BULK-MEM-NEXT: memory.copy 0, 0, $pop[[L4]], $0, $pop[[L5]]
; BULK-MEM-NEXT: return
define void @memcpy_alloca_dst(ptr %src) {
%a = alloca [100 x i8]
@@ -253,17 +248,11 @@ define void @memcpy_alloca_dst(ptr %src) {
; BULK-MEM-NEXT: .functype memset_alloca (i32) -> ()
; BULK-MEM-NEXT: global.get $push[[L1:[0-9]+]]=, __stack_pointer
; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 112
-; BULK-MEM-NEXT: i64.sub $1=, $pop[[L1]], $pop[[L0]]
-; BULK-MEM-NEXT: block
-; BULK-MEM-NEXT: i64.const $push[[L2:[0-9]+]]=, 100
-; BULK-MEM-NEXT: i64.eqz $push[[L3:[0-9]+]]=, $pop[[L2]]
-; BULK-MEM-NEXT: br_if 0, $pop[[L3]]
-; BULK-MEM-NEXT: i64.const $push[[L4:[0-9]+]]=, 12
-; BULK-MEM-NEXT: i64.add $push[[L5:[0-9]+]]=, $1, $pop[[L4]]
-; BULK-MEM-NEXT: i64.const $push[[L6:[0-9]+]]=, 100
-; BULK-MEM-NEXT: memory.fill 0, $pop[[L5]], $0, $pop[[L6]]
-; BULK-MEM-NEXT: .LBB{{.*}}:
-; BULK-MEM-NEXT: end_block
+; BULK-MEM-NEXT: i64.sub $push[[L2:[0-9]+]]=, $pop[[L1]], $pop[[L0]]
+; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 12
+; BULK-MEM-NEXT: i64.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]
+; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 100
+; BULK-MEM-NEXT: memory.fill 0, $pop[[L4]], $0, $pop[[L5]]
; BULK-MEM-NEXT: return
define void @memset_alloca(i8 %val) {
%a = alloca [100 x i8]
diff --git a/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll b/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll
index 28b4541..7bdc4e1 100644
--- a/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll
+++ b/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll
@@ -44,7 +44,7 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) {
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
; CHECK-NEXT: callq __ubyte_convert_to_ctype
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: js LBB0_6
+; CHECK-NEXT: js LBB0_4
; CHECK-NEXT: ## %bb.1: ## %cond_next.i
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
; CHECK-NEXT: movq %rbx, %rdi
@@ -53,84 +53,81 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) {
; CHECK-NEXT: sarl $31, %ecx
; CHECK-NEXT: andl %eax, %ecx
; CHECK-NEXT: cmpl $-2, %ecx
-; CHECK-NEXT: je LBB0_10
+; CHECK-NEXT: je LBB0_8
; CHECK-NEXT: ## %bb.2: ## %cond_next.i
; CHECK-NEXT: cmpl $-1, %ecx
-; CHECK-NEXT: jne LBB0_3
-; CHECK-NEXT: LBB0_8: ## %bb4
+; CHECK-NEXT: jne LBB0_6
+; CHECK-NEXT: LBB0_3: ## %bb4
; CHECK-NEXT: movq _PyArray_API@GOTPCREL(%rip), %rax
; CHECK-NEXT: movq (%rax), %rax
; CHECK-NEXT: movq 16(%rax), %rax
-; CHECK-NEXT: jmp LBB0_9
-; CHECK-NEXT: LBB0_6: ## %_ubyte_convert2_to_ctypes.exit
+; CHECK-NEXT: jmp LBB0_10
+; CHECK-NEXT: LBB0_4: ## %_ubyte_convert2_to_ctypes.exit
; CHECK-NEXT: cmpl $-2, %eax
-; CHECK-NEXT: je LBB0_10
-; CHECK-NEXT: ## %bb.7: ## %_ubyte_convert2_to_ctypes.exit
-; CHECK-NEXT: cmpl $-1, %eax
; CHECK-NEXT: je LBB0_8
-; CHECK-NEXT: LBB0_3: ## %bb35
+; CHECK-NEXT: ## %bb.5: ## %_ubyte_convert2_to_ctypes.exit
+; CHECK-NEXT: cmpl $-1, %eax
+; CHECK-NEXT: je LBB0_3
+; CHECK-NEXT: LBB0_6: ## %bb35
; CHECK-NEXT: movq _PyUFunc_API@GOTPCREL(%rip), %r14
; CHECK-NEXT: movq (%r14), %rax
; CHECK-NEXT: callq *216(%rax)
; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %edx
; CHECK-NEXT: testb %dl, %dl
-; CHECK-NEXT: je LBB0_4
-; CHECK-NEXT: ## %bb.12: ## %cond_false.i
-; CHECK-NEXT: setne %dil
+; CHECK-NEXT: je LBB0_11
+; CHECK-NEXT: ## %bb.7: ## %cond_false.i
; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
; CHECK-NEXT: movzbl %sil, %ecx
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: divb %dl
; CHECK-NEXT: movl %eax, %r15d
; CHECK-NEXT: testb %cl, %cl
-; CHECK-NEXT: setne %al
-; CHECK-NEXT: testb %dil, %al
-; CHECK-NEXT: jne LBB0_5
-; CHECK-NEXT: LBB0_13: ## %cond_true.i200
-; CHECK-NEXT: testb %dl, %dl
-; CHECK-NEXT: jne LBB0_15
-; CHECK-NEXT: ## %bb.14: ## %cond_true14.i
-; CHECK-NEXT: movl $4, %edi
-; CHECK-NEXT: callq _feraiseexcept
-; CHECK-NEXT: LBB0_15: ## %ubyte_ctype_remainder.exit
-; CHECK-NEXT: xorl %ebx, %ebx
-; CHECK-NEXT: jmp LBB0_16
-; CHECK-NEXT: LBB0_10: ## %bb17
+; CHECK-NEXT: jne LBB0_12
+; CHECK-NEXT: jmp LBB0_14
+; CHECK-NEXT: LBB0_8: ## %bb17
; CHECK-NEXT: callq _PyErr_Occurred
; CHECK-NEXT: testq %rax, %rax
-; CHECK-NEXT: jne LBB0_23
-; CHECK-NEXT: ## %bb.11: ## %cond_next
+; CHECK-NEXT: jne LBB0_27
+; CHECK-NEXT: ## %bb.9: ## %cond_next
; CHECK-NEXT: movq _PyArray_API@GOTPCREL(%rip), %rax
; CHECK-NEXT: movq (%rax), %rax
; CHECK-NEXT: movq 80(%rax), %rax
-; CHECK-NEXT: LBB0_9: ## %bb4
+; CHECK-NEXT: LBB0_10: ## %bb4
; CHECK-NEXT: movq 96(%rax), %rax
; CHECK-NEXT: movq %r14, %rdi
; CHECK-NEXT: movq %rbx, %rsi
; CHECK-NEXT: callq *40(%rax)
-; CHECK-NEXT: jmp LBB0_24
-; CHECK-NEXT: LBB0_4: ## %cond_true.i
+; CHECK-NEXT: jmp LBB0_28
+; CHECK-NEXT: LBB0_11: ## %cond_true.i
; CHECK-NEXT: movl $4, %edi
; CHECK-NEXT: callq _feraiseexcept
; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %edx
; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
+; CHECK-NEXT: xorl %r15d, %r15d
; CHECK-NEXT: testb %sil, %sil
-; CHECK-NEXT: sete %al
+; CHECK-NEXT: je LBB0_14
+; CHECK-NEXT: LBB0_12: ## %cond_false.i
; CHECK-NEXT: testb %dl, %dl
-; CHECK-NEXT: sete %cl
-; CHECK-NEXT: xorl %r15d, %r15d
-; CHECK-NEXT: orb %al, %cl
-; CHECK-NEXT: jne LBB0_13
-; CHECK-NEXT: LBB0_5: ## %cond_next17.i
+; CHECK-NEXT: je LBB0_14
+; CHECK-NEXT: ## %bb.13: ## %cond_next17.i
; CHECK-NEXT: movzbl %sil, %eax
; CHECK-NEXT: divb %dl
; CHECK-NEXT: movzbl %ah, %ebx
-; CHECK-NEXT: LBB0_16: ## %ubyte_ctype_remainder.exit
+; CHECK-NEXT: jmp LBB0_18
+; CHECK-NEXT: LBB0_14: ## %cond_true.i200
+; CHECK-NEXT: testb %dl, %dl
+; CHECK-NEXT: jne LBB0_17
+; CHECK-NEXT: ## %bb.16: ## %cond_true14.i
+; CHECK-NEXT: movl $4, %edi
+; CHECK-NEXT: callq _feraiseexcept
+; CHECK-NEXT: LBB0_17: ## %ubyte_ctype_remainder.exit
+; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: LBB0_18: ## %ubyte_ctype_remainder.exit
; CHECK-NEXT: movq (%r14), %rax
; CHECK-NEXT: callq *224(%rax)
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je LBB0_19
-; CHECK-NEXT: ## %bb.17: ## %cond_true61
+; CHECK-NEXT: je LBB0_21
+; CHECK-NEXT: ## %bb.19: ## %cond_true61
; CHECK-NEXT: movl %eax, %ebp
; CHECK-NEXT: movq (%r14), %rax
; CHECK-NEXT: movq _.str5@GOTPCREL(%rip), %rdi
@@ -139,8 +136,8 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) {
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
; CHECK-NEXT: callq *200(%rax)
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: js LBB0_23
-; CHECK-NEXT: ## %bb.18: ## %cond_next73
+; CHECK-NEXT: js LBB0_27
+; CHECK-NEXT: ## %bb.20: ## %cond_next73
; CHECK-NEXT: movl $1, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq (%r14), %rax
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rsi
@@ -149,13 +146,13 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) {
; CHECK-NEXT: movl %ebp, %edx
; CHECK-NEXT: callq *232(%rax)
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: jne LBB0_23
-; CHECK-NEXT: LBB0_19: ## %cond_next89
+; CHECK-NEXT: jne LBB0_27
+; CHECK-NEXT: LBB0_21: ## %cond_next89
; CHECK-NEXT: movl $2, %edi
; CHECK-NEXT: callq _PyTuple_New
; CHECK-NEXT: testq %rax, %rax
-; CHECK-NEXT: je LBB0_23
-; CHECK-NEXT: ## %bb.20: ## %cond_next97
+; CHECK-NEXT: je LBB0_27
+; CHECK-NEXT: ## %bb.22: ## %cond_next97
; CHECK-NEXT: movq %rax, %r14
; CHECK-NEXT: movq _PyArray_API@GOTPCREL(%rip), %r12
; CHECK-NEXT: movq (%r12), %rax
@@ -163,8 +160,8 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) {
; CHECK-NEXT: xorl %esi, %esi
; CHECK-NEXT: callq *304(%rdi)
; CHECK-NEXT: testq %rax, %rax
-; CHECK-NEXT: je LBB0_21
-; CHECK-NEXT: ## %bb.25: ## %cond_next135
+; CHECK-NEXT: je LBB0_25
+; CHECK-NEXT: ## %bb.23: ## %cond_next135
; CHECK-NEXT: movb %r15b, 16(%rax)
; CHECK-NEXT: movq %rax, 24(%r14)
; CHECK-NEXT: movq (%r12), %rax
@@ -172,22 +169,22 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) {
; CHECK-NEXT: xorl %esi, %esi
; CHECK-NEXT: callq *304(%rdi)
; CHECK-NEXT: testq %rax, %rax
-; CHECK-NEXT: je LBB0_21
-; CHECK-NEXT: ## %bb.26: ## %cond_next182
+; CHECK-NEXT: je LBB0_25
+; CHECK-NEXT: ## %bb.24: ## %cond_next182
; CHECK-NEXT: movb %bl, 16(%rax)
; CHECK-NEXT: movq %rax, 32(%r14)
; CHECK-NEXT: movq %r14, %rax
-; CHECK-NEXT: jmp LBB0_24
-; CHECK-NEXT: LBB0_21: ## %cond_true113
+; CHECK-NEXT: jmp LBB0_28
+; CHECK-NEXT: LBB0_25: ## %cond_true113
; CHECK-NEXT: decq (%r14)
-; CHECK-NEXT: jne LBB0_23
-; CHECK-NEXT: ## %bb.22: ## %cond_true126
+; CHECK-NEXT: jne LBB0_27
+; CHECK-NEXT: ## %bb.26: ## %cond_true126
; CHECK-NEXT: movq 8(%r14), %rax
; CHECK-NEXT: movq %r14, %rdi
; CHECK-NEXT: callq *48(%rax)
-; CHECK-NEXT: LBB0_23: ## %UnifiedReturnBlock
+; CHECK-NEXT: LBB0_27: ## %UnifiedReturnBlock
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: LBB0_24: ## %UnifiedReturnBlock
+; CHECK-NEXT: LBB0_28: ## %UnifiedReturnBlock
; CHECK-NEXT: addq $32, %rsp
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r12
diff --git a/llvm/test/CodeGen/X86/apx/cf.ll b/llvm/test/CodeGen/X86/apx/cf.ll
index b2651e9..af9d944 100644
--- a/llvm/test/CodeGen/X86/apx/cf.ll
+++ b/llvm/test/CodeGen/X86/apx/cf.ll
@@ -230,6 +230,23 @@ entry:
ret void
}
+define void @and_cond(i32 %a, i1 %b) {
+; CHECK-LABEL: and_cond:
+; CHECK: # %bb.0:
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: setg %al
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: testb %al, %sil
+; CHECK-NEXT: cfcmovel %ecx, 0
+; CHECK-NEXT: retq
+ %is_pos = icmp sgt i32 %a, 0
+ %not_b = xor i1 %b, true
+ %cond = and i1 %not_b, %is_pos
+ %mask = insertelement <1 x i1> zeroinitializer, i1 %cond, i64 0
+ call void @llvm.masked.store.v1i32.p0(<1 x i32> zeroinitializer, ptr null, i32 1, <1 x i1> %mask)
+ ret void
+}
+
define i64 @redundant_test(i64 %num, ptr %p1, i64 %in) {
; CHECK-LABEL: redundant_test:
; CHECK: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/cpus-intel.ll b/llvm/test/CodeGen/X86/cpus-intel.ll
index 40c38c2..71253c8 100644
--- a/llvm/test/CodeGen/X86/cpus-intel.ll
+++ b/llvm/test/CodeGen/X86/cpus-intel.ll
@@ -38,6 +38,7 @@
; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=lunarlake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=gracemont 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=pantherlake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=wildcatlake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=clearwaterforest 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=diamondrapids 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
@@ -104,6 +105,7 @@
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=lunarlake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=gracemont 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=pantherlake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=wildcatlake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=clearwaterforest 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=diamondrapids 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
diff --git a/llvm/test/CodeGen/X86/isel-fpclass.ll b/llvm/test/CodeGen/X86/isel-fpclass.ll
index df04b67..c2b7068 100644
--- a/llvm/test/CodeGen/X86/isel-fpclass.ll
+++ b/llvm/test/CodeGen/X86/isel-fpclass.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=i686-linux | FileCheck %s -check-prefixes=X86
+; RUN: llc < %s -mtriple=i686-linux | FileCheck %s -check-prefixes=X86,X86-SDAGISEL
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefixes=X64,X64-SDAGISEL
; RUN: llc < %s -mtriple=i686-linux -fast-isel -fast-isel-abort=1 | FileCheck %s -check-prefixes=X86-FASTISEL
; RUN: llc < %s -mtriple=x86_64-linux -fast-isel -fast-isel-abort=1 | FileCheck %s -check-prefixes=X64,X64-FASTISEL
-; RUN: llc < %s -mtriple=i686-linux -global-isel -global-isel-abort=2 | FileCheck %s -check-prefixes=X86
-; RUN: llc < %s -mtriple=x86_64-linux -global-isel -global-isel-abort=2 | FileCheck %s -check-prefixes=X64,X64-GISEL
+; RUN: llc < %s -mtriple=i686-linux -global-isel -global-isel-abort=1 | FileCheck %s -check-prefixes=X86,X86-GISEL
+; RUN: llc < %s -mtriple=x86_64-linux -global-isel -global-isel-abort=1 | FileCheck %s -check-prefixes=X64-GISEL
define i1 @isnone_f(float %x) nounwind {
; X86-LABEL: isnone_f:
@@ -23,6 +23,11 @@ define i1 @isnone_f(float %x) nounwind {
; X86-FASTISEL-NEXT: fstp %st(0)
; X86-FASTISEL-NEXT: xorl %eax, %eax
; X86-FASTISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: isnone_f:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: xorl %eax, %eax
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 0)
ret i1 %0
@@ -45,22 +50,27 @@ define i1 @isany_f(float %x) nounwind {
; X86-FASTISEL-NEXT: fstp %st(0)
; X86-FASTISEL-NEXT: movb $1, %al
; X86-FASTISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: isany_f:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: movb $1, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1023)
ret i1 %0
}
define i1 @issignaling_f(float %x) nounwind {
-; X86-LABEL: issignaling_f:
-; X86: # %bb.0:
-; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
-; X86-NEXT: setl %cl
-; X86-NEXT: cmpl $2139095041, %eax # imm = 0x7F800001
-; X86-NEXT: setge %al
-; X86-NEXT: andb %cl, %al
-; X86-NEXT: retl
+; X86-SDAGISEL-LABEL: issignaling_f:
+; X86-SDAGISEL: # %bb.0:
+; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-SDAGISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
+; X86-SDAGISEL-NEXT: setl %cl
+; X86-SDAGISEL-NEXT: cmpl $2139095041, %eax # imm = 0x7F800001
+; X86-SDAGISEL-NEXT: setge %al
+; X86-SDAGISEL-NEXT: andb %cl, %al
+; X86-SDAGISEL-NEXT: retl
;
; X64-LABEL: issignaling_f:
; X64: # %bb.0:
@@ -87,18 +97,44 @@ define i1 @issignaling_f(float %x) nounwind {
; X86-FASTISEL-NEXT: andb %cl, %al
; X86-FASTISEL-NEXT: popl %ecx
; X86-FASTISEL-NEXT: retl
+;
+; X86-GISEL-LABEL: issignaling_f:
+; X86-GISEL: # %bb.0:
+; X86-GISEL-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT: xorl %ecx, %ecx
+; X86-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT: seta %dl
+; X86-GISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
+; X86-GISEL-NEXT: setb %al
+; X86-GISEL-NEXT: andb %dl, %al
+; X86-GISEL-NEXT: orb %cl, %al
+; X86-GISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: issignaling_f:
+; X64-GISEL: # %bb.0:
+; X64-GISEL-NEXT: movd %xmm0, %eax
+; X64-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-GISEL-NEXT: xorl %ecx, %ecx
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: seta %dl
+; X64-GISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
+; X64-GISEL-NEXT: setb %al
+; X64-GISEL-NEXT: andb %dl, %al
+; X64-GISEL-NEXT: orb %cl, %al
+; X64-GISEL-NEXT: retq
%a0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1) ; "snan"
ret i1 %a0
}
define i1 @isquiet_f(float %x) nounwind {
-; X86-LABEL: isquiet_f:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
-; X86-NEXT: setge %al
-; X86-NEXT: retl
+; X86-SDAGISEL-LABEL: isquiet_f:
+; X86-SDAGISEL: # %bb.0: # %entry
+; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-SDAGISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
+; X86-SDAGISEL-NEXT: setge %al
+; X86-SDAGISEL-NEXT: retl
;
; X64-LABEL: isquiet_f:
; X64: # %bb.0: # %entry
@@ -119,19 +155,39 @@ define i1 @issignaling_f(float %x) nounwind {
; X86-FASTISEL-NEXT: setge %al
; X86-FASTISEL-NEXT: popl %ecx
; X86-FASTISEL-NEXT: retl
+;
+; X86-GISEL-LABEL: isquiet_f:
+; X86-GISEL: # %bb.0: # %entry
+; X86-GISEL-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT: xorl %ecx, %ecx
+; X86-GISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
+; X86-GISEL-NEXT: setae %al
+; X86-GISEL-NEXT: orb %cl, %al
+; X86-GISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: isquiet_f:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: movd %xmm0, %eax
+; X64-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-GISEL-NEXT: xorl %ecx, %ecx
+; X64-GISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
+; X64-GISEL-NEXT: setae %al
+; X64-GISEL-NEXT: orb %cl, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 2) ; "qnan"
ret i1 %0
}
define i1 @not_isquiet_f(float %x) nounwind {
-; X86-LABEL: not_isquiet_f:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
-; X86-NEXT: setl %al
-; X86-NEXT: retl
+; X86-SDAGISEL-LABEL: not_isquiet_f:
+; X86-SDAGISEL: # %bb.0: # %entry
+; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-SDAGISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
+; X86-SDAGISEL-NEXT: setl %al
+; X86-SDAGISEL-NEXT: retl
;
; X64-LABEL: not_isquiet_f:
; X64: # %bb.0: # %entry
@@ -152,19 +208,57 @@ define i1 @not_isquiet_f(float %x) nounwind {
; X86-FASTISEL-NEXT: setl %al
; X86-FASTISEL-NEXT: popl %ecx
; X86-FASTISEL-NEXT: retl
+;
+; X86-GISEL-LABEL: not_isquiet_f:
+; X86-GISEL: # %bb.0: # %entry
+; X86-GISEL-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT: xorl %ecx, %ecx
+; X86-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT: setb %dl
+; X86-GISEL-NEXT: orb %cl, %dl
+; X86-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT: sete %cl
+; X86-GISEL-NEXT: orb %dl, %cl
+; X86-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT: seta %dl
+; X86-GISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
+; X86-GISEL-NEXT: setb %al
+; X86-GISEL-NEXT: andb %dl, %al
+; X86-GISEL-NEXT: orb %cl, %al
+; X86-GISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: not_isquiet_f:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: movd %xmm0, %eax
+; X64-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-GISEL-NEXT: xorl %ecx, %ecx
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: setb %dl
+; X64-GISEL-NEXT: orb %cl, %dl
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: sete %cl
+; X64-GISEL-NEXT: orb %dl, %cl
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: seta %dl
+; X64-GISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000
+; X64-GISEL-NEXT: setb %al
+; X64-GISEL-NEXT: andb %dl, %al
+; X64-GISEL-NEXT: orb %cl, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1021) ; ~"qnan"
ret i1 %0
}
define i1 @isinf_f(float %x) nounwind {
-; X86-LABEL: isinf_f:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
-; X86-NEXT: sete %al
-; X86-NEXT: retl
+; X86-SDAGISEL-LABEL: isinf_f:
+; X86-SDAGISEL: # %bb.0: # %entry
+; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-SDAGISEL-NEXT: sete %al
+; X86-SDAGISEL-NEXT: retl
;
; X64-LABEL: isinf_f:
; X64: # %bb.0: # %entry
@@ -185,19 +279,39 @@ define i1 @isinf_f(float %x) nounwind {
; X86-FASTISEL-NEXT: sete %al
; X86-FASTISEL-NEXT: popl %ecx
; X86-FASTISEL-NEXT: retl
+;
+; X86-GISEL-LABEL: isinf_f:
+; X86-GISEL: # %bb.0: # %entry
+; X86-GISEL-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT: xorl %ecx, %ecx
+; X86-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT: sete %al
+; X86-GISEL-NEXT: orb %cl, %al
+; X86-GISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: isinf_f:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: movd %xmm0, %eax
+; X64-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-GISEL-NEXT: xorl %ecx, %ecx
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: sete %al
+; X64-GISEL-NEXT: orb %cl, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 516) ; 0x204 = "inf"
ret i1 %0
}
define i1 @not_isinf_f(float %x) nounwind {
-; X86-LABEL: not_isinf_f:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
-; X86-NEXT: setne %al
-; X86-NEXT: retl
+; X86-SDAGISEL-LABEL: not_isinf_f:
+; X86-SDAGISEL: # %bb.0: # %entry
+; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-SDAGISEL-NEXT: setne %al
+; X86-SDAGISEL-NEXT: retl
;
; X64-LABEL: not_isinf_f:
; X64: # %bb.0: # %entry
@@ -218,17 +332,43 @@ define i1 @not_isinf_f(float %x) nounwind {
; X86-FASTISEL-NEXT: setne %al
; X86-FASTISEL-NEXT: popl %ecx
; X86-FASTISEL-NEXT: retl
+;
+; X86-GISEL-LABEL: not_isinf_f:
+; X86-GISEL: # %bb.0: # %entry
+; X86-GISEL-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT: xorl %ecx, %ecx
+; X86-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT: setb %dl
+; X86-GISEL-NEXT: orb %cl, %dl
+; X86-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT: seta %al
+; X86-GISEL-NEXT: orb %dl, %al
+; X86-GISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: not_isinf_f:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: movd %xmm0, %eax
+; X64-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-GISEL-NEXT: xorl %ecx, %ecx
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: setb %dl
+; X64-GISEL-NEXT: orb %cl, %dl
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: seta %al
+; X64-GISEL-NEXT: orb %dl, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 507) ; ~0x204 = "~inf"
ret i1 %0
}
define i1 @is_plus_inf_f(float %x) nounwind {
-; X86-LABEL: is_plus_inf_f:
-; X86: # %bb.0: # %entry
-; X86-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000
-; X86-NEXT: sete %al
-; X86-NEXT: retl
+; X86-SDAGISEL-LABEL: is_plus_inf_f:
+; X86-SDAGISEL: # %bb.0: # %entry
+; X86-SDAGISEL-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000
+; X86-SDAGISEL-NEXT: sete %al
+; X86-SDAGISEL-NEXT: retl
;
; X64-LABEL: is_plus_inf_f:
; X64: # %bb.0: # %entry
@@ -246,17 +386,34 @@ define i1 @is_plus_inf_f(float %x) nounwind {
; X86-FASTISEL-NEXT: sete %al
; X86-FASTISEL-NEXT: popl %ecx
; X86-FASTISEL-NEXT: retl
+;
+; X86-GISEL-LABEL: is_plus_inf_f:
+; X86-GISEL: # %bb.0: # %entry
+; X86-GISEL-NEXT: xorl %ecx, %ecx
+; X86-GISEL-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000
+; X86-GISEL-NEXT: sete %al
+; X86-GISEL-NEXT: orb %cl, %al
+; X86-GISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: is_plus_inf_f:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: xorl %ecx, %ecx
+; X64-GISEL-NEXT: movd %xmm0, %eax
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: sete %al
+; X64-GISEL-NEXT: orb %cl, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 512) ; 0x200 = "+inf"
ret i1 %0
}
define i1 @is_minus_inf_f(float %x) nounwind {
-; X86-LABEL: is_minus_inf_f:
-; X86: # %bb.0: # %entry
-; X86-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000
-; X86-NEXT: sete %al
-; X86-NEXT: retl
+; X86-SDAGISEL-LABEL: is_minus_inf_f:
+; X86-SDAGISEL: # %bb.0: # %entry
+; X86-SDAGISEL-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000
+; X86-SDAGISEL-NEXT: sete %al
+; X86-SDAGISEL-NEXT: retl
;
; X64-LABEL: is_minus_inf_f:
; X64: # %bb.0: # %entry
@@ -274,17 +431,34 @@ define i1 @is_minus_inf_f(float %x) nounwind {
; X86-FASTISEL-NEXT: sete %al
; X86-FASTISEL-NEXT: popl %ecx
; X86-FASTISEL-NEXT: retl
+;
+; X86-GISEL-LABEL: is_minus_inf_f:
+; X86-GISEL: # %bb.0: # %entry
+; X86-GISEL-NEXT: xorl %ecx, %ecx
+; X86-GISEL-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000
+; X86-GISEL-NEXT: sete %al
+; X86-GISEL-NEXT: orb %cl, %al
+; X86-GISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: is_minus_inf_f:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: xorl %ecx, %ecx
+; X64-GISEL-NEXT: movd %xmm0, %eax
+; X64-GISEL-NEXT: cmpl $-8388608, %eax # imm = 0xFF800000
+; X64-GISEL-NEXT: sete %al
+; X64-GISEL-NEXT: orb %cl, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 4) ; "-inf"
ret i1 %0
}
define i1 @not_is_minus_inf_f(float %x) nounwind {
-; X86-LABEL: not_is_minus_inf_f:
-; X86: # %bb.0: # %entry
-; X86-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000
-; X86-NEXT: setne %al
-; X86-NEXT: retl
+; X86-SDAGISEL-LABEL: not_is_minus_inf_f:
+; X86-SDAGISEL: # %bb.0: # %entry
+; X86-SDAGISEL-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000
+; X86-SDAGISEL-NEXT: setne %al
+; X86-SDAGISEL-NEXT: retl
;
; X64-LABEL: not_is_minus_inf_f:
; X64: # %bb.0: # %entry
@@ -302,19 +476,55 @@ define i1 @not_is_minus_inf_f(float %x) nounwind {
; X86-FASTISEL-NEXT: setne %al
; X86-FASTISEL-NEXT: popl %ecx
; X86-FASTISEL-NEXT: retl
+;
+; X86-GISEL-LABEL: not_is_minus_inf_f:
+; X86-GISEL: # %bb.0: # %entry
+; X86-GISEL-NEXT: pushl %ebx
+; X86-GISEL-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT: movl %eax, %ecx
+; X86-GISEL-NEXT: andl $2147483647, %ecx # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT: xorl %edx, %edx
+; X86-GISEL-NEXT: cmpl $2139095040, %ecx # imm = 0x7F800000
+; X86-GISEL-NEXT: setb %bl
+; X86-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT: sete %ah
+; X86-GISEL-NEXT: orb %dl, %ah
+; X86-GISEL-NEXT: orb %bl, %ah
+; X86-GISEL-NEXT: cmpl $2139095040, %ecx # imm = 0x7F800000
+; X86-GISEL-NEXT: seta %al
+; X86-GISEL-NEXT: orb %ah, %al
+; X86-GISEL-NEXT: popl %ebx
+; X86-GISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: not_is_minus_inf_f:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: movd %xmm0, %eax
+; X64-GISEL-NEXT: movl %eax, %ecx
+; X64-GISEL-NEXT: andl $2147483647, %ecx # imm = 0x7FFFFFFF
+; X64-GISEL-NEXT: xorl %edx, %edx
+; X64-GISEL-NEXT: cmpl $2139095040, %ecx # imm = 0x7F800000
+; X64-GISEL-NEXT: setb %sil
+; X64-GISEL-NEXT: orb %dl, %sil
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: sete %dl
+; X64-GISEL-NEXT: cmpl $2139095040, %ecx # imm = 0x7F800000
+; X64-GISEL-NEXT: seta %al
+; X64-GISEL-NEXT: orb %dl, %al
+; X64-GISEL-NEXT: orb %sil, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1019) ; ~"-inf"
ret i1 %0
}
define i1 @isfinite_f(float %x) nounwind {
-; X86-LABEL: isfinite_f:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
-; X86-NEXT: setl %al
-; X86-NEXT: retl
+; X86-SDAGISEL-LABEL: isfinite_f:
+; X86-SDAGISEL: # %bb.0: # %entry
+; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-SDAGISEL-NEXT: setl %al
+; X86-SDAGISEL-NEXT: retl
;
; X64-LABEL: isfinite_f:
; X64: # %bb.0: # %entry
@@ -335,19 +545,39 @@ define i1 @isfinite_f(float %x) nounwind {
; X86-FASTISEL-NEXT: setl %al
; X86-FASTISEL-NEXT: popl %ecx
; X86-FASTISEL-NEXT: retl
+;
+; X86-GISEL-LABEL: isfinite_f:
+; X86-GISEL: # %bb.0: # %entry
+; X86-GISEL-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT: xorl %ecx, %ecx
+; X86-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT: setb %al
+; X86-GISEL-NEXT: orb %cl, %al
+; X86-GISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: isfinite_f:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: movd %xmm0, %eax
+; X64-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-GISEL-NEXT: xorl %ecx, %ecx
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: setb %al
+; X64-GISEL-NEXT: orb %cl, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 504) ; 0x1f8 = "finite"
ret i1 %0
}
define i1 @not_isfinite_f(float %x) nounwind {
-; X86-LABEL: not_isfinite_f:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
-; X86-NEXT: setge %al
-; X86-NEXT: retl
+; X86-SDAGISEL-LABEL: not_isfinite_f:
+; X86-SDAGISEL: # %bb.0: # %entry
+; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-SDAGISEL-NEXT: setge %al
+; X86-SDAGISEL-NEXT: retl
;
; X64-LABEL: not_isfinite_f:
; X64: # %bb.0: # %entry
@@ -368,17 +598,43 @@ define i1 @not_isfinite_f(float %x) nounwind {
; X86-FASTISEL-NEXT: setge %al
; X86-FASTISEL-NEXT: popl %ecx
; X86-FASTISEL-NEXT: retl
+;
+; X86-GISEL-LABEL: not_isfinite_f:
+; X86-GISEL: # %bb.0: # %entry
+; X86-GISEL-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-GISEL-NEXT: xorl %ecx, %ecx
+; X86-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT: sete %dl
+; X86-GISEL-NEXT: orb %cl, %dl
+; X86-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X86-GISEL-NEXT: seta %al
+; X86-GISEL-NEXT: orb %dl, %al
+; X86-GISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: not_isfinite_f:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: movd %xmm0, %eax
+; X64-GISEL-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-GISEL-NEXT: xorl %ecx, %ecx
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: sete %dl
+; X64-GISEL-NEXT: orb %cl, %dl
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: seta %al
+; X64-GISEL-NEXT: orb %dl, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 519) ; ~0x1f8 = "~finite"
ret i1 %0
}
define i1 @is_plus_finite_f(float %x) nounwind {
-; X86-LABEL: is_plus_finite_f:
-; X86: # %bb.0: # %entry
-; X86-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000
-; X86-NEXT: setb %al
-; X86-NEXT: retl
+; X86-SDAGISEL-LABEL: is_plus_finite_f:
+; X86-SDAGISEL: # %bb.0: # %entry
+; X86-SDAGISEL-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000
+; X86-SDAGISEL-NEXT: setb %al
+; X86-SDAGISEL-NEXT: retl
;
; X64-LABEL: is_plus_finite_f:
; X64: # %bb.0: # %entry
@@ -396,6 +652,23 @@ define i1 @is_plus_finite_f(float %x) nounwind {
; X86-FASTISEL-NEXT: setb %al
; X86-FASTISEL-NEXT: popl %ecx
; X86-FASTISEL-NEXT: retl
+;
+; X86-GISEL-LABEL: is_plus_finite_f:
+; X86-GISEL: # %bb.0: # %entry
+; X86-GISEL-NEXT: xorl %ecx, %ecx
+; X86-GISEL-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000
+; X86-GISEL-NEXT: setb %al
+; X86-GISEL-NEXT: orb %cl, %al
+; X86-GISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: is_plus_finite_f:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: xorl %ecx, %ecx
+; X64-GISEL-NEXT: movd %xmm0, %eax
+; X64-GISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000
+; X64-GISEL-NEXT: setb %al
+; X64-GISEL-NEXT: orb %cl, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 448) ; 0x1c0 = "+finite"
ret i1 %0
@@ -418,6 +691,11 @@ define i1 @isnone_d(double %x) nounwind {
; X86-FASTISEL-NEXT: fstp %st(0)
; X86-FASTISEL-NEXT: xorl %eax, %eax
; X86-FASTISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: isnone_d:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: xorl %eax, %eax
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f64(double %x, i32 0)
ret i1 %0
@@ -440,6 +718,11 @@ define i1 @isany_d(double %x) nounwind {
; X86-FASTISEL-NEXT: fstp %st(0)
; X86-FASTISEL-NEXT: movb $1, %al
; X86-FASTISEL-NEXT: retl
+;
+; X64-GISEL-LABEL: isany_d:
+; X64-GISEL: # %bb.0: # %entry
+; X64-GISEL-NEXT: movb $1, %al
+; X64-GISEL-NEXT: retq
entry:
%0 = tail call i1 @llvm.is.fpclass.f64(double %x, i32 1023)
ret i1 %0
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 4cde581..caec02e 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -4765,6 +4765,66 @@ define void @scaleidx_scatter_outofrange(<8 x float> %value, ptr %base, <8 x i32
}
declare void @llvm.masked.scatter.v8f32.v8p0(<8 x float>, <8 x ptr>, i32 immarg, <8 x i1>)
+define <16 x i32> @pr163023_sext(ptr %a0, <16 x i32> %a1) {
+; X64-LABEL: pr163023_sext:
+; X64: # %bb.0:
+; X64-NEXT: kxnorw %k0, %k0, %k1
+; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-NEXT: vpgatherdd (%rdi,%zmm0), %zmm1 {%k1}
+; X64-NEXT: vmovdqa64 %zmm1, %zmm0
+; X64-NEXT: retq
+;
+; X86-LABEL: pr163023_sext:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: kxnorw %k0, %k0, %k1
+; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X86-NEXT: vpgatherdd (%eax,%zmm0), %zmm1 {%k1}
+; X86-NEXT: vmovdqa64 %zmm1, %zmm0
+; X86-NEXT: retl
+ %addr.p = ptrtoint ptr %a0 to i64
+ %addr.v = insertelement <1 x i64> poison, i64 %addr.p, i64 0
+ %addr.splat = shufflevector <1 x i64> %addr.v, <1 x i64> poison, <16 x i32> zeroinitializer
+ %ofs = sext <16 x i32> %a1 to <16 x i64>
+ %addr = add nuw <16 x i64> %addr.splat, %ofs
+ %ptr = inttoptr <16 x i64> %addr to <16 x ptr>
+ %gather = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> %ptr, i32 4, <16 x i1> splat (i1 true), <16 x i32> poison)
+ ret <16 x i32> %gather
+}
+
+define <16 x i32> @pr163023_zext(ptr %a0, <16 x i32> %a1) {
+; X64-LABEL: pr163023_zext:
+; X64: # %bb.0:
+; X64-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; X64-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; X64-NEXT: kxnorw %k0, %k0, %k1
+; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; X64-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; X64-NEXT: kxnorw %k0, %k0, %k2
+; X64-NEXT: vpgatherqd (%rdi,%zmm0), %ymm3 {%k2}
+; X64-NEXT: vpgatherqd (%rdi,%zmm1), %ymm2 {%k1}
+; X64-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm0
+; X64-NEXT: retq
+;
+; X86-LABEL: pr163023_zext:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: kxnorw %k0, %k0, %k1
+; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X86-NEXT: vpgatherdd (%eax,%zmm0), %zmm1 {%k1}
+; X86-NEXT: vmovdqa64 %zmm1, %zmm0
+; X86-NEXT: retl
+ %addr.p = ptrtoint ptr %a0 to i64
+ %addr.v = insertelement <1 x i64> poison, i64 %addr.p, i64 0
+ %addr.splat = shufflevector <1 x i64> %addr.v, <1 x i64> poison, <16 x i32> zeroinitializer
+ %ofs = zext <16 x i32> %a1 to <16 x i64>
+ %addr = add nuw <16 x i64> %addr.splat, %ofs
+ %ptr = inttoptr <16 x i64> %addr to <16 x ptr>
+ %gather = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> %ptr, i32 4, <16 x i1> splat (i1 true), <16 x i32> poison)
+ ret <16 x i32> %gather
+}
+
;
; PR45906
; This used to cause fast-isel to generate bad copy instructions that would
diff --git a/llvm/test/CodeGen/X86/pr160612.ll b/llvm/test/CodeGen/X86/pr160612.ll
new file mode 100644
index 0000000..6572c42
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr160612.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -O2 | FileCheck %s
+
+; Test for issue #160612: OR conditions in branches should use multiple branches
+; instead of materializing booleans with SETCC when no special optimizations apply.
+
+declare void @subroutine_foo()
+declare void @subroutine_bar()
+
+; Original issue: (x == 0 || y == 0) was generating SETCC + TEST + BRANCH
+; instead of using two conditional branches directly.
+define void @func_a(i32 noundef %x, i32 noundef %y) {
+; CHECK-LABEL: func_a:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: je subroutine_foo@PLT # TAILCALL
+; CHECK-NEXT: # %bb.1: # %entry
+; CHECK-NEXT: testl %esi, %esi
+; CHECK-NEXT: jne subroutine_bar@PLT # TAILCALL
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: jmp subroutine_foo@PLT # TAILCALL
+entry:
+ %cmp = icmp eq i32 %x, 0
+ %cmp1 = icmp eq i32 %y, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.else
+
+if.then:
+ tail call void @subroutine_foo()
+ br label %if.end
+
+if.else:
+ tail call void @subroutine_bar()
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; Reference implementation that already generated optimal code.
+; This should continue to generate the same optimal code.
+define void @func_b(i32 noundef %x, i32 noundef %y) {
+; CHECK-LABEL: func_b:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: je subroutine_foo@PLT # TAILCALL
+; CHECK-NEXT: # %bb.1: # %if.else
+; CHECK-NEXT: testl %esi, %esi
+; CHECK-NEXT: je subroutine_foo@PLT # TAILCALL
+; CHECK-NEXT: # %bb.2: # %if.else3
+; CHECK-NEXT: jmp subroutine_bar@PLT # TAILCALL
+entry:
+ %cmp = icmp eq i32 %x, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ tail call void @subroutine_foo()
+ br label %if.end4
+
+if.else:
+ %cmp1 = icmp eq i32 %y, 0
+ br i1 %cmp1, label %if.then2, label %if.else3
+
+if.then2:
+ tail call void @subroutine_foo()
+ br label %if.end4
+
+if.else3:
+ tail call void @subroutine_bar()
+ br label %if.end4
+
+if.end4:
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/setcc-wide-types.ll b/llvm/test/CodeGen/X86/setcc-wide-types.ll
index 69abf6e..d018c53 100644
--- a/llvm/test/CodeGen/X86/setcc-wide-types.ll
+++ b/llvm/test/CodeGen/X86/setcc-wide-types.ll
@@ -1493,15 +1493,23 @@ define i1 @allbits_i128_load_arg(ptr %w) {
}
define i1 @anybits_i256_load_arg(ptr %w) {
-; ANY-LABEL: anybits_i256_load_arg:
-; ANY: # %bb.0:
-; ANY-NEXT: movq (%rdi), %rax
-; ANY-NEXT: movq 8(%rdi), %rcx
-; ANY-NEXT: orq 24(%rdi), %rcx
-; ANY-NEXT: orq 16(%rdi), %rax
-; ANY-NEXT: orq %rcx, %rax
-; ANY-NEXT: setne %al
-; ANY-NEXT: retq
+; SSE-LABEL: anybits_i256_load_arg:
+; SSE: # %bb.0:
+; SSE-NEXT: movq (%rdi), %rax
+; SSE-NEXT: movq 8(%rdi), %rcx
+; SSE-NEXT: orq 24(%rdi), %rcx
+; SSE-NEXT: orq 16(%rdi), %rax
+; SSE-NEXT: orq %rcx, %rax
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVXANY-LABEL: anybits_i256_load_arg:
+; AVXANY: # %bb.0:
+; AVXANY-NEXT: vmovdqu (%rdi), %ymm0
+; AVXANY-NEXT: vptest %ymm0, %ymm0
+; AVXANY-NEXT: setne %al
+; AVXANY-NEXT: vzeroupper
+; AVXANY-NEXT: retq
%ld = load i256, ptr %w
%cmp = icmp ne i256 %ld, 0
ret i1 %cmp
@@ -1552,21 +1560,30 @@ define i1 @allbits_i256_load_arg(ptr %w) {
}
define i1 @anybits_i512_load_arg(ptr %w) {
-; ANY-LABEL: anybits_i512_load_arg:
-; ANY: # %bb.0:
-; ANY-NEXT: movq 16(%rdi), %rax
-; ANY-NEXT: movq (%rdi), %rcx
-; ANY-NEXT: movq 8(%rdi), %rdx
-; ANY-NEXT: movq 24(%rdi), %rsi
-; ANY-NEXT: orq 56(%rdi), %rsi
-; ANY-NEXT: orq 40(%rdi), %rdx
-; ANY-NEXT: orq %rsi, %rdx
-; ANY-NEXT: orq 48(%rdi), %rax
-; ANY-NEXT: orq 32(%rdi), %rcx
-; ANY-NEXT: orq %rax, %rcx
-; ANY-NEXT: orq %rdx, %rcx
-; ANY-NEXT: setne %al
-; ANY-NEXT: retq
+; NO512-LABEL: anybits_i512_load_arg:
+; NO512: # %bb.0:
+; NO512-NEXT: movq 16(%rdi), %rax
+; NO512-NEXT: movq (%rdi), %rcx
+; NO512-NEXT: movq 8(%rdi), %rdx
+; NO512-NEXT: movq 24(%rdi), %rsi
+; NO512-NEXT: orq 56(%rdi), %rsi
+; NO512-NEXT: orq 40(%rdi), %rdx
+; NO512-NEXT: orq %rsi, %rdx
+; NO512-NEXT: orq 48(%rdi), %rax
+; NO512-NEXT: orq 32(%rdi), %rcx
+; NO512-NEXT: orq %rax, %rcx
+; NO512-NEXT: orq %rdx, %rcx
+; NO512-NEXT: setne %al
+; NO512-NEXT: retq
+;
+; AVX512-LABEL: anybits_i512_load_arg:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
+; AVX512-NEXT: setne %al
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
%ld = load i512, ptr %w
%cmp = icmp ne i512 %ld, 0
ret i1 %cmp
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
index 42617c1..18588aa 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
@@ -24,7 +24,7 @@ define float @sqrt_ieee_ninf(float %f) #0 {
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
; CHECK-NEXT: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
- ; CHECK-NEXT: [[VRSQRTSSr:%[0-9]+]]:fr32 = VRSQRTSSr killed [[DEF]], [[COPY]]
+ ; CHECK-NEXT: [[VRSQRTSSr:%[0-9]+]]:fr32 = ninf afn VRSQRTSSr killed [[DEF]], [[COPY]]
; CHECK-NEXT: [[VMULSSrr:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr
; CHECK-NEXT: [[VMOVSSrm_alt:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load (s32) from constant-pool)
; CHECK-NEXT: [[VFMADD213SSr:%[0-9]+]]:fr32 = ninf afn nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed [[VMULSSrr]], [[VMOVSSrm_alt]], implicit $mxcsr
@@ -71,7 +71,7 @@ define float @sqrt_daz_ninf(float %f) #1 {
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
; CHECK-NEXT: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
- ; CHECK-NEXT: [[VRSQRTSSr:%[0-9]+]]:fr32 = VRSQRTSSr killed [[DEF]], [[COPY]]
+ ; CHECK-NEXT: [[VRSQRTSSr:%[0-9]+]]:fr32 = ninf afn VRSQRTSSr killed [[DEF]], [[COPY]]
; CHECK-NEXT: [[VMULSSrr:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr
; CHECK-NEXT: [[VMOVSSrm_alt:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load (s32) from constant-pool)
; CHECK-NEXT: [[VFMADD213SSr:%[0-9]+]]:fr32 = ninf afn nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed [[VMULSSrr]], [[VMOVSSrm_alt]], implicit $mxcsr
diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
index b2064b1..02d4d88 100644
--- a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
+++ b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
@@ -181,40 +181,38 @@ define zeroext i1 @segmentedStack(ptr readonly %vk1, ptr readonly %vk2, i64 %key
; CHECK-LABEL: segmentedStack:
; CHECK: ## %bb.0:
; CHECK-NEXT: cmpq %gs:816, %rsp
-; CHECK-NEXT: jbe LBB3_6
+; CHECK-NEXT: jbe LBB3_7
; CHECK-NEXT: LBB3_1: ## %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: testq %rdi, %rdi
-; CHECK-NEXT: sete %al
-; CHECK-NEXT: testq %rsi, %rsi
-; CHECK-NEXT: sete %cl
-; CHECK-NEXT: orb %al, %cl
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: sete %al
-; CHECK-NEXT: testb %cl, %cl
-; CHECK-NEXT: jne LBB3_4
-; CHECK-NEXT: ## %bb.2: ## %if.end4.i
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: je LBB3_5
+; CHECK-NEXT: ## %bb.2: ## %entry
+; CHECK-NEXT: testq %rsi, %rsi
+; CHECK-NEXT: je LBB3_5
+; CHECK-NEXT: ## %bb.3: ## %if.end4.i
; CHECK-NEXT: movq 8(%rdi), %rdx
; CHECK-NEXT: cmpq 8(%rsi), %rdx
-; CHECK-NEXT: jne LBB3_5
-; CHECK-NEXT: ## %bb.3: ## %land.rhs.i.i
+; CHECK-NEXT: jne LBB3_6
+; CHECK-NEXT: ## %bb.4: ## %land.rhs.i.i
; CHECK-NEXT: movq (%rsi), %rsi
; CHECK-NEXT: movq (%rdi), %rdi
; CHECK-NEXT: callq _memcmp
; CHECK-NEXT: testl %eax, %eax
; CHECK-NEXT: sete %al
-; CHECK-NEXT: LBB3_4: ## %__go_ptr_strings_equal.exit
+; CHECK-NEXT: LBB3_5: ## %__go_ptr_strings_equal.exit
; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: retq
-; CHECK-NEXT: LBB3_5:
+; CHECK-NEXT: LBB3_6:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: retq
-; CHECK-NEXT: LBB3_6:
+; CHECK-NEXT: LBB3_7:
; CHECK-NEXT: movl $8, %r10d
; CHECK-NEXT: movl $0, %r11d
; CHECK-NEXT: callq ___morestack
@@ -224,43 +222,41 @@ define zeroext i1 @segmentedStack(ptr readonly %vk1, ptr readonly %vk2, i64 %key
; NOCOMPACTUNWIND-LABEL: segmentedStack:
; NOCOMPACTUNWIND: # %bb.0:
; NOCOMPACTUNWIND-NEXT: cmpq %fs:112, %rsp
-; NOCOMPACTUNWIND-NEXT: jbe .LBB3_6
+; NOCOMPACTUNWIND-NEXT: jbe .LBB3_7
; NOCOMPACTUNWIND-NEXT: .LBB3_1: # %entry
; NOCOMPACTUNWIND-NEXT: pushq %rax
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16
-; NOCOMPACTUNWIND-NEXT: testq %rdi, %rdi
-; NOCOMPACTUNWIND-NEXT: sete %al
-; NOCOMPACTUNWIND-NEXT: testq %rsi, %rsi
-; NOCOMPACTUNWIND-NEXT: sete %cl
-; NOCOMPACTUNWIND-NEXT: orb %al, %cl
; NOCOMPACTUNWIND-NEXT: movq %rdi, %rax
; NOCOMPACTUNWIND-NEXT: orq %rsi, %rax
; NOCOMPACTUNWIND-NEXT: sete %al
-; NOCOMPACTUNWIND-NEXT: testb %cl, %cl
-; NOCOMPACTUNWIND-NEXT: jne .LBB3_4
-; NOCOMPACTUNWIND-NEXT: # %bb.2: # %if.end4.i
+; NOCOMPACTUNWIND-NEXT: testq %rdi, %rdi
+; NOCOMPACTUNWIND-NEXT: je .LBB3_5
+; NOCOMPACTUNWIND-NEXT: # %bb.2: # %entry
+; NOCOMPACTUNWIND-NEXT: testq %rsi, %rsi
+; NOCOMPACTUNWIND-NEXT: je .LBB3_5
+; NOCOMPACTUNWIND-NEXT: # %bb.3: # %if.end4.i
; NOCOMPACTUNWIND-NEXT: movq 8(%rdi), %rdx
; NOCOMPACTUNWIND-NEXT: cmpq 8(%rsi), %rdx
-; NOCOMPACTUNWIND-NEXT: jne .LBB3_5
-; NOCOMPACTUNWIND-NEXT: # %bb.3: # %land.rhs.i.i
+; NOCOMPACTUNWIND-NEXT: jne .LBB3_6
+; NOCOMPACTUNWIND-NEXT: # %bb.4: # %land.rhs.i.i
; NOCOMPACTUNWIND-NEXT: movq (%rsi), %rsi
; NOCOMPACTUNWIND-NEXT: movq (%rdi), %rdi
; NOCOMPACTUNWIND-NEXT: callq memcmp@PLT
; NOCOMPACTUNWIND-NEXT: testl %eax, %eax
; NOCOMPACTUNWIND-NEXT: sete %al
-; NOCOMPACTUNWIND-NEXT: .LBB3_4: # %__go_ptr_strings_equal.exit
+; NOCOMPACTUNWIND-NEXT: .LBB3_5: # %__go_ptr_strings_equal.exit
; NOCOMPACTUNWIND-NEXT: # kill: def $al killed $al killed $eax
; NOCOMPACTUNWIND-NEXT: popq %rcx
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 8
; NOCOMPACTUNWIND-NEXT: retq
-; NOCOMPACTUNWIND-NEXT: .LBB3_5:
+; NOCOMPACTUNWIND-NEXT: .LBB3_6:
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16
; NOCOMPACTUNWIND-NEXT: xorl %eax, %eax
; NOCOMPACTUNWIND-NEXT: # kill: def $al killed $al killed $eax
; NOCOMPACTUNWIND-NEXT: popq %rcx
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 8
; NOCOMPACTUNWIND-NEXT: retq
-; NOCOMPACTUNWIND-NEXT: .LBB3_6:
+; NOCOMPACTUNWIND-NEXT: .LBB3_7:
; NOCOMPACTUNWIND-NEXT: movl $8, %r10d
; NOCOMPACTUNWIND-NEXT: movl $0, %r11d
; NOCOMPACTUNWIND-NEXT: callq __morestack