diff options
Diffstat (limited to 'llvm/test')
23 files changed, 1269 insertions, 1421 deletions
diff --git a/llvm/test/CodeGen/AArch64/seh-extended-spills.ll b/llvm/test/CodeGen/AArch64/seh-extended-spills.ll new file mode 100644 index 0000000..ecc2270 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/seh-extended-spills.ll @@ -0,0 +1,34 @@ +; RUN: llc -mtriple aarch64-unknown-windows-msvc -filetype asm -o - %s | FileCheck %s + +declare dso_local void @g(ptr noundef) +define dso_local preserve_mostcc void @f(ptr noundef %p) #0 { +entry: + %p.addr = alloca ptr, align 8 + store ptr %p, ptr %p.addr, align 8 + %0 = load ptr, ptr %p.addr, align 8 + call void @g(ptr noundef %0) + ret void +} + +attributes #0 = { nounwind uwtable(sync) } + +; CHECK: stp x9, x10, [sp, #[[OFFSET_0:[0-9]+]]] +; CHECK-NEXT: .seh_save_any_reg_p x9, [[OFFSET_0]] +; CHECK: stp x11, x12, [sp, #[[OFFSET_1:[0-9]+]]] +; CHECK-NEXT: .seh_save_any_reg_p x11, [[OFFSET_1]] +; CHECK: stp x13, x14, [sp, #[[OFFSET_2:[0-9]+]]] +; CHECK-NEXT: .seh_save_any_reg_p x13, [[OFFSET_2]] +; CHECK: str x15, [sp, #[[OFFSET_3:[0-9]+]]] +; CHECK-NEXT: .seh_save_any_reg x15, [[OFFSET_3]] +; CHECK: .seh_endprologue + +; CHECK: .seh_startepilogue +; CHECK: ldr x15, [sp, #[[OFFSET_3]]] +; CHECK-NEXT: .seh_save_any_reg x15, [[OFFSET_3]] +; CHECK: ldp x13, x14, [sp, #[[OFFSET_2]]] +; CHECK-NEXT: .seh_save_any_reg_p x13, [[OFFSET_2]] +; CHECK: ldp x11, x12, [sp, #[[OFFSET_1]]] +; CHECK-NEXT: .seh_save_any_reg_p x11, [[OFFSET_1]] +; CHECK: ldp x9, x10, [sp, #[[OFFSET_0]]] +; CHECK-NEXT: .seh_save_any_reg_p x9, [[OFFSET_0]] +; CHECK: .seh_endepilogue diff --git a/llvm/test/CodeGen/RISCV/machine-outliner-cfi.mir b/llvm/test/CodeGen/RISCV/machine-outliner-cfi.mir index 2acb1d4..78d242b 100644 --- a/llvm/test/CodeGen/RISCV/machine-outliner-cfi.mir +++ b/llvm/test/CodeGen/RISCV/machine-outliner-cfi.mir @@ -3,27 +3,33 @@ # RUN: llc -mtriple=riscv64 -x mir -run-pass=machine-outliner -simplify-mir -verify-machineinstrs < %s \ # RUN: | FileCheck -check-prefixes=OUTLINED,RV64I-MO %s -# CFIs are invisible (they can be outlined, but won't actually impact the outlining result) if there -# is no need to unwind. CFIs will be stripped when we build outlined functions. +# Combined tests for outlining with CFI instructions on RISC-V: +# 1) All CFIs present in candidate: outline as tail-call and keep CFIs. +# 2) Partial CFIs in function (extra outside candidate): do not outline. +# 3) CFIs present but candidate is not a tail-call: do not outline. --- | - define void @func1(i32 %a, i32 %b) nounwind { ret void } - - define void @func2(i32 %a, i32 %b) nounwind { ret void } - - define void @func3(i32 %a, i32 %b) nounwind { ret void } + define void @funcA(i32 %a, i32 %b) nounwind { ret void } + define void @funcB(i32 %a, i32 %b) nounwind { ret void } + define void @funcC(i32 %a, i32 %b) nounwind { ret void } + define void @funcD(i32 %a, i32 %b) nounwind { ret void } + define void @funcE(i32 %a, i32 %b) nounwind { ret void } + define void @funcF(i32 %a, i32 %b) nounwind { ret void } ... + +# Case 1: All CFIs present; expect outlining and CFIs retained in outlined body. --- -name: func1 +name: funcA tracksRegLiveness: true body: | bb.0: liveins: $x10, $x11 - ; RV32I-MO-LABEL: name: func1 + ; RV32I-MO-LABEL: name: funcA ; RV32I-MO: liveins: $x10, $x11 ; RV32I-MO-NEXT: {{ $}} ; RV32I-MO-NEXT: PseudoTAIL target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit $x2, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x2, implicit $x10, implicit $x11 - ; RV64I-MO-LABEL: name: func1 + ; + ; RV64I-MO-LABEL: name: funcA ; RV64I-MO: liveins: $x10, $x11 ; RV64I-MO-NEXT: {{ $}} ; RV64I-MO-NEXT: PseudoTAIL target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit $x2, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x2, implicit $x10, implicit $x11 @@ -39,62 +45,213 @@ body: | PseudoRET ... --- -name: func2 +name: funcB tracksRegLiveness: true body: | bb.0: liveins: $x10, $x11 - ; RV32I-MO-LABEL: name: func2 + ; RV32I-MO-LABEL: name: funcB ; RV32I-MO: liveins: $x10, $x11 ; RV32I-MO-NEXT: {{ $}} ; RV32I-MO-NEXT: PseudoTAIL target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit $x2, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x2, implicit $x10, implicit $x11 - ; RV64I-MO-LABEL: name: func2 + ; + ; RV64I-MO-LABEL: name: funcB ; RV64I-MO: liveins: $x10, $x11 ; RV64I-MO-NEXT: {{ $}} ; RV64I-MO-NEXT: PseudoTAIL target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit $x2, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x2, implicit $x10, implicit $x11 $x10 = ORI $x10, 1023 CFI_INSTRUCTION offset $x1, 0 $x11 = ORI $x11, 1023 - CFI_INSTRUCTION offset $x1, -8 - $x12 = ADDI $x10, 17 CFI_INSTRUCTION offset $x1, -4 + $x12 = ADDI $x10, 17 + CFI_INSTRUCTION offset $x1, -8 $x11 = AND $x12, $x11 CFI_INSTRUCTION offset $x1, -12 $x10 = SUB $x10, $x11 PseudoRET ... + +# Case 2: Partial CFIs (extra CFI outside candidate in funcD); expect no outlining. --- -name: func3 +name: funcC tracksRegLiveness: true body: | bb.0: liveins: $x10, $x11 - ; RV32I-MO-LABEL: name: func3 + ; RV32I-MO-LABEL: name: funcC ; RV32I-MO: liveins: $x10, $x11 ; RV32I-MO-NEXT: {{ $}} ; RV32I-MO-NEXT: PseudoTAIL target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit $x2, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x2, implicit $x10, implicit $x11 - ; RV64I-MO-LABEL: name: func3 + ; + ; RV64I-MO-LABEL: name: funcC ; RV64I-MO: liveins: $x10, $x11 ; RV64I-MO-NEXT: {{ $}} ; RV64I-MO-NEXT: PseudoTAIL target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit $x2, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x2, implicit $x10, implicit $x11 $x10 = ORI $x10, 1023 - CFI_INSTRUCTION offset $x1, -12 + CFI_INSTRUCTION offset $x1, 0 $x11 = ORI $x11, 1023 + CFI_INSTRUCTION offset $x1, -4 + $x12 = ADDI $x10, 17 CFI_INSTRUCTION offset $x1, -8 + $x11 = AND $x12, $x11 + CFI_INSTRUCTION offset $x1, -12 + $x10 = SUB $x10, $x11 + PseudoRET +... +--- +name: funcD +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + ; RV32I-MO-LABEL: name: funcD + ; RV32I-MO: liveins: $x10, $x11 + ; RV32I-MO-NEXT: {{ $}} + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, -16 + ; RV32I-MO-NEXT: $x10 = ORI $x10, 1023 + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, 0 + ; RV32I-MO-NEXT: $x11 = ORI $x11, 1023 + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, -4 + ; RV32I-MO-NEXT: $x12 = ADDI $x10, 17 + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, -8 + ; RV32I-MO-NEXT: $x11 = AND $x12, $x11 + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, -12 + ; RV32I-MO-NEXT: $x10 = SUB $x10, $x11 + ; RV32I-MO-NEXT: PseudoRET + ; + ; RV64I-MO-LABEL: name: funcD + ; RV64I-MO: liveins: $x10, $x11 + ; RV64I-MO-NEXT: {{ $}} + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, -16 + ; RV64I-MO-NEXT: $x10 = ORI $x10, 1023 + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, 0 + ; RV64I-MO-NEXT: $x11 = ORI $x11, 1023 + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, -4 + ; RV64I-MO-NEXT: $x12 = ADDI $x10, 17 + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, -8 + ; RV64I-MO-NEXT: $x11 = AND $x12, $x11 + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, -12 + ; RV64I-MO-NEXT: $x10 = SUB $x10, $x11 + ; RV64I-MO-NEXT: PseudoRET + CFI_INSTRUCTION offset $x1, -16 + $x10 = ORI $x10, 1023 + CFI_INSTRUCTION offset $x1, 0 + $x11 = ORI $x11, 1023 + CFI_INSTRUCTION offset $x1, -4 $x12 = ADDI $x10, 17 + CFI_INSTRUCTION offset $x1, -8 + $x11 = AND $x12, $x11 + CFI_INSTRUCTION offset $x1, -12 + $x10 = SUB $x10, $x11 + PseudoRET +... + +# Case 3: CFIs present but candidate is not a tail-call; expect no outlining. +--- +name: funcE +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + ; RV32I-MO-LABEL: name: funcE + ; RV32I-MO: liveins: $x10, $x11 + ; RV32I-MO-NEXT: {{ $}} + ; RV32I-MO-NEXT: $x10 = ORI $x10, 1023 + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, 0 + ; RV32I-MO-NEXT: $x11 = ORI $x11, 1023 + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, -4 + ; RV32I-MO-NEXT: $x12 = ADDI $x10, 17 + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, -8 + ; RV32I-MO-NEXT: $x11 = AND $x12, $x11 + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, -12 + ; RV32I-MO-NEXT: $x10 = SUB $x10, $x11 + ; RV32I-MO-NEXT: $x10 = ADDI $x10, 1 + ; RV32I-MO-NEXT: PseudoRET + ; + ; RV64I-MO-LABEL: name: funcE + ; RV64I-MO: liveins: $x10, $x11 + ; RV64I-MO-NEXT: {{ $}} + ; RV64I-MO-NEXT: $x10 = ORI $x10, 1023 + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, 0 + ; RV64I-MO-NEXT: $x11 = ORI $x11, 1023 + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, -4 + ; RV64I-MO-NEXT: $x12 = ADDI $x10, 17 + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, -8 + ; RV64I-MO-NEXT: $x11 = AND $x12, $x11 + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, -12 + ; RV64I-MO-NEXT: $x10 = SUB $x10, $x11 + ; RV64I-MO-NEXT: $x10 = ADDI $x10, 1 + ; RV64I-MO-NEXT: PseudoRET + $x10 = ORI $x10, 1023 + CFI_INSTRUCTION offset $x1, 0 + $x11 = ORI $x11, 1023 CFI_INSTRUCTION offset $x1, -4 + $x12 = ADDI $x10, 17 + CFI_INSTRUCTION offset $x1, -8 $x11 = AND $x12, $x11 + CFI_INSTRUCTION offset $x1, -12 + $x10 = SUB $x10, $x11 + $x10 = ADDI $x10, 1 + PseudoRET +... +--- +name: funcF +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + ; RV32I-MO-LABEL: name: funcF + ; RV32I-MO: liveins: $x10, $x11 + ; RV32I-MO-NEXT: {{ $}} + ; RV32I-MO-NEXT: $x10 = ORI $x10, 1023 + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, 0 + ; RV32I-MO-NEXT: $x11 = ORI $x11, 1023 + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, -4 + ; RV32I-MO-NEXT: $x12 = ADDI $x10, 17 + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, -8 + ; RV32I-MO-NEXT: $x11 = AND $x12, $x11 + ; RV32I-MO-NEXT: CFI_INSTRUCTION offset $x1, -12 + ; RV32I-MO-NEXT: $x10 = SUB $x10, $x11 + ; RV32I-MO-NEXT: $x10 = ADDI $x10, 2 + ; RV32I-MO-NEXT: PseudoRET + ; + ; RV64I-MO-LABEL: name: funcF + ; RV64I-MO: liveins: $x10, $x11 + ; RV64I-MO-NEXT: {{ $}} + ; RV64I-MO-NEXT: $x10 = ORI $x10, 1023 + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, 0 + ; RV64I-MO-NEXT: $x11 = ORI $x11, 1023 + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, -4 + ; RV64I-MO-NEXT: $x12 = ADDI $x10, 17 + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, -8 + ; RV64I-MO-NEXT: $x11 = AND $x12, $x11 + ; RV64I-MO-NEXT: CFI_INSTRUCTION offset $x1, -12 + ; RV64I-MO-NEXT: $x10 = SUB $x10, $x11 + ; RV64I-MO-NEXT: $x10 = ADDI $x10, 2 + ; RV64I-MO-NEXT: PseudoRET + $x10 = ORI $x10, 1023 CFI_INSTRUCTION offset $x1, 0 + $x11 = ORI $x11, 1023 + CFI_INSTRUCTION offset $x1, -4 + $x12 = ADDI $x10, 17 + CFI_INSTRUCTION offset $x1, -8 + $x11 = AND $x12, $x11 + CFI_INSTRUCTION offset $x1, -12 $x10 = SUB $x10, $x11 + $x10 = ADDI $x10, 2 PseudoRET - +... # OUTLINED-LABEL: name: OUTLINED_FUNCTION_0 # OUTLINED: liveins: $x11, $x10 # OUTLINED-NEXT: {{ $}} # OUTLINED-NEXT: $x10 = ORI $x10, 1023 +# OUTLINED-NEXT: CFI_INSTRUCTION offset $x1, 0 # OUTLINED-NEXT: $x11 = ORI $x11, 1023 +# OUTLINED-NEXT: CFI_INSTRUCTION offset $x1, -4 # OUTLINED-NEXT: $x12 = ADDI $x10, 17 +# OUTLINED-NEXT: CFI_INSTRUCTION offset $x1, -8 # OUTLINED-NEXT: $x11 = AND $x12, $x11 +# OUTLINED-NEXT: CFI_INSTRUCTION offset $x1, -12 # OUTLINED-NEXT: $x10 = SUB $x10, $x11 # OUTLINED-NEXT: PseudoRET diff --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll index 92ae85f..8490dd0 100644 --- a/llvm/test/CodeGen/RISCV/remat.ll +++ b/llvm/test/CodeGen/RISCV/remat.ll @@ -1,6 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -O1 -mtriple=riscv32 -verify-machineinstrs < %s \ -; RUN: | FileCheck %s -check-prefix=RV32I +; RUN: llc -O1 -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s @a = common global i32 0, align 4 @l = common global i32 0, align 4 @@ -21,113 +20,113 @@ ; situation. define i32 @test() nounwind { -; RV32I-LABEL: test: -; RV32I: # %bb.0: # %entry -; RV32I-NEXT: addi sp, sp, -64 -; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s0, 56(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s1, 52(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s2, 48(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s3, 44(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s4, 40(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s5, 36(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s6, 32(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s7, 28(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s8, 24(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s9, 20(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s10, 16(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s11, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: lui s0, %hi(a) -; RV32I-NEXT: lw a0, %lo(a)(s0) -; RV32I-NEXT: beqz a0, .LBB0_11 -; RV32I-NEXT: # %bb.1: # %for.body.preheader -; RV32I-NEXT: lui s1, %hi(l) -; RV32I-NEXT: lui s2, %hi(k) -; RV32I-NEXT: lui s3, %hi(j) -; RV32I-NEXT: lui s4, %hi(i) -; RV32I-NEXT: lui s5, %hi(d) -; RV32I-NEXT: lui s6, %hi(e) -; RV32I-NEXT: lui s7, %hi(f) -; RV32I-NEXT: lui s8, %hi(g) -; RV32I-NEXT: lui s9, %hi(h) -; RV32I-NEXT: lui s10, %hi(c) -; RV32I-NEXT: lui s11, %hi(b) -; RV32I-NEXT: j .LBB0_3 -; RV32I-NEXT: .LBB0_2: # %for.inc -; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1 -; RV32I-NEXT: lw a0, %lo(a)(s0) -; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: sw a0, %lo(a)(s0) -; RV32I-NEXT: beqz a0, .LBB0_11 -; RV32I-NEXT: .LBB0_3: # %for.body -; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: lw a1, %lo(l)(s1) -; RV32I-NEXT: beqz a1, .LBB0_5 -; RV32I-NEXT: # %bb.4: # %if.then -; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1 -; RV32I-NEXT: lw a1, %lo(b)(s11) -; RV32I-NEXT: lw a2, %lo(c)(s10) -; RV32I-NEXT: lw a3, %lo(d)(s5) -; RV32I-NEXT: lw a4, %lo(e)(s6) -; RV32I-NEXT: li a5, 32 -; RV32I-NEXT: call foo -; RV32I-NEXT: .LBB0_5: # %if.end -; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1 -; RV32I-NEXT: lw a0, %lo(k)(s2) -; RV32I-NEXT: beqz a0, .LBB0_7 -; RV32I-NEXT: # %bb.6: # %if.then3 -; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1 -; RV32I-NEXT: lw a0, %lo(b)(s11) -; RV32I-NEXT: lw a1, %lo(c)(s10) -; RV32I-NEXT: lw a2, %lo(d)(s5) -; RV32I-NEXT: lw a3, %lo(e)(s6) -; RV32I-NEXT: lw a4, %lo(f)(s7) -; RV32I-NEXT: li a5, 64 -; RV32I-NEXT: call foo -; RV32I-NEXT: .LBB0_7: # %if.end5 -; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1 -; RV32I-NEXT: lw a0, %lo(j)(s3) -; RV32I-NEXT: beqz a0, .LBB0_9 -; RV32I-NEXT: # %bb.8: # %if.then7 -; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1 -; RV32I-NEXT: lw a0, %lo(c)(s10) -; RV32I-NEXT: lw a1, %lo(d)(s5) -; RV32I-NEXT: lw a2, %lo(e)(s6) -; RV32I-NEXT: lw a3, %lo(f)(s7) -; RV32I-NEXT: lw a4, %lo(g)(s8) -; RV32I-NEXT: li a5, 32 -; RV32I-NEXT: call foo -; RV32I-NEXT: .LBB0_9: # %if.end9 -; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1 -; RV32I-NEXT: lw a0, %lo(i)(s4) -; RV32I-NEXT: beqz a0, .LBB0_2 -; RV32I-NEXT: # %bb.10: # %if.then11 -; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1 -; RV32I-NEXT: lw a0, %lo(d)(s5) -; RV32I-NEXT: lw a1, %lo(e)(s6) -; RV32I-NEXT: lw a2, %lo(f)(s7) -; RV32I-NEXT: lw a3, %lo(g)(s8) -; RV32I-NEXT: lw a4, %lo(h)(s9) -; RV32I-NEXT: li a5, 32 -; RV32I-NEXT: call foo -; RV32I-NEXT: j .LBB0_2 -; RV32I-NEXT: .LBB0_11: # %for.end -; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s1, 52(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s2, 48(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s3, 44(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s4, 40(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s5, 36(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s6, 32(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s7, 28(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s8, 24(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s9, 20(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s10, 16(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s11, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: addi sp, sp, 64 -; RV32I-NEXT: ret +; CHECK-LABEL: test: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -112 +; CHECK-NEXT: sd ra, 104(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s0, 96(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s1, 88(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s2, 80(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s3, 72(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s4, 64(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s5, 56(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s6, 48(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s7, 40(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s8, 32(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s9, 24(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s10, 16(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s11, 8(sp) # 8-byte Folded Spill +; CHECK-NEXT: lui s0, %hi(a) +; CHECK-NEXT: lw a0, %lo(a)(s0) +; CHECK-NEXT: beqz a0, .LBB0_11 +; CHECK-NEXT: # %bb.1: # %for.body.preheader +; CHECK-NEXT: lui s1, %hi(l) +; CHECK-NEXT: lui s2, %hi(k) +; CHECK-NEXT: lui s3, %hi(j) +; CHECK-NEXT: lui s4, %hi(i) +; CHECK-NEXT: lui s5, %hi(d) +; CHECK-NEXT: lui s6, %hi(e) +; CHECK-NEXT: lui s7, %hi(f) +; CHECK-NEXT: lui s8, %hi(g) +; CHECK-NEXT: lui s9, %hi(h) +; CHECK-NEXT: lui s10, %hi(c) +; CHECK-NEXT: lui s11, %hi(b) +; CHECK-NEXT: j .LBB0_3 +; CHECK-NEXT: .LBB0_2: # %for.inc +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: lw a0, %lo(a)(s0) +; CHECK-NEXT: addiw a0, a0, -1 +; CHECK-NEXT: sw a0, %lo(a)(s0) +; CHECK-NEXT: beqz a0, .LBB0_11 +; CHECK-NEXT: .LBB0_3: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: lw a1, %lo(l)(s1) +; CHECK-NEXT: beqz a1, .LBB0_5 +; CHECK-NEXT: # %bb.4: # %if.then +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: lw a4, %lo(e)(s6) +; CHECK-NEXT: lw a3, %lo(d)(s5) +; CHECK-NEXT: lw a2, %lo(c)(s10) +; CHECK-NEXT: lw a1, %lo(b)(s11) +; CHECK-NEXT: li a5, 32 +; CHECK-NEXT: call foo +; CHECK-NEXT: .LBB0_5: # %if.end +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: lw a0, %lo(k)(s2) +; CHECK-NEXT: beqz a0, .LBB0_7 +; CHECK-NEXT: # %bb.6: # %if.then3 +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: lw a4, %lo(f)(s7) +; CHECK-NEXT: lw a3, %lo(e)(s6) +; CHECK-NEXT: lw a2, %lo(d)(s5) +; CHECK-NEXT: lw a1, %lo(c)(s10) +; CHECK-NEXT: lw a0, %lo(b)(s11) +; CHECK-NEXT: li a5, 64 +; CHECK-NEXT: call foo +; CHECK-NEXT: .LBB0_7: # %if.end5 +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: lw a0, %lo(j)(s3) +; CHECK-NEXT: beqz a0, .LBB0_9 +; CHECK-NEXT: # %bb.8: # %if.then7 +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: lw a4, %lo(g)(s8) +; CHECK-NEXT: lw a3, %lo(f)(s7) +; CHECK-NEXT: lw a2, %lo(e)(s6) +; CHECK-NEXT: lw a1, %lo(d)(s5) +; CHECK-NEXT: lw a0, %lo(c)(s10) +; CHECK-NEXT: li a5, 32 +; CHECK-NEXT: call foo +; CHECK-NEXT: .LBB0_9: # %if.end9 +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: lw a0, %lo(i)(s4) +; CHECK-NEXT: beqz a0, .LBB0_2 +; CHECK-NEXT: # %bb.10: # %if.then11 +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: lw a4, %lo(h)(s9) +; CHECK-NEXT: lw a3, %lo(g)(s8) +; CHECK-NEXT: lw a2, %lo(f)(s7) +; CHECK-NEXT: lw a1, %lo(e)(s6) +; CHECK-NEXT: lw a0, %lo(d)(s5) +; CHECK-NEXT: li a5, 32 +; CHECK-NEXT: call foo +; CHECK-NEXT: j .LBB0_2 +; CHECK-NEXT: .LBB0_11: # %for.end +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: ld ra, 104(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s0, 96(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s1, 88(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s2, 80(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s3, 72(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s4, 64(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s5, 56(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s6, 48(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s7, 40(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s8, 32(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s9, 24(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s10, 16(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s11, 8(sp) # 8-byte Folded Reload +; CHECK-NEXT: addi sp, sp, 112 +; CHECK-NEXT: ret entry: %.pr = load i32, ptr @a, align 4 %tobool14 = icmp eq i32 %.pr, 0 diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll index d4b2288..e56c7b4 100644 --- a/llvm/test/CodeGen/RISCV/rv64zba.ll +++ b/llvm/test/CodeGen/RISCV/rv64zba.ll @@ -1459,6 +1459,34 @@ define i64 @mul288(i64 %a) { ret i64 %c } +define i64 @zext_mul44(i32 signext %a) { +; RV64I-LABEL: zext_mul44: +; RV64I: # %bb.0: +; RV64I-NEXT: li a1, 11 +; RV64I-NEXT: slli a1, a1, 34 +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: mulhu a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64ZBA-LABEL: zext_mul44: +; RV64ZBA: # %bb.0: +; RV64ZBA-NEXT: slli.uw a0, a0, 2 +; RV64ZBA-NEXT: sh2add a1, a0, a0 +; RV64ZBA-NEXT: sh1add a0, a1, a0 +; RV64ZBA-NEXT: ret +; +; RV64XANDESPERF-LABEL: zext_mul44: +; RV64XANDESPERF: # %bb.0: +; RV64XANDESPERF-NEXT: slli a0, a0, 32 +; RV64XANDESPERF-NEXT: srli a0, a0, 30 +; RV64XANDESPERF-NEXT: nds.lea.w a1, a0, a0 +; RV64XANDESPERF-NEXT: nds.lea.h a0, a0, a1 +; RV64XANDESPERF-NEXT: ret + %b = zext i32 %a to i64 + %c = mul i64 %b, 44 + ret i64 %c +} + define i64 @zext_mul68(i32 signext %a) { ; RV64I-LABEL: zext_mul68: ; RV64I: # %bb.0: @@ -1511,6 +1539,34 @@ define i64 @zext_mul96(i32 signext %a) { ret i64 %c } +define i64 @zext_mul100(i32 signext %a) { +; RV64I-LABEL: zext_mul100: +; RV64I: # %bb.0: +; RV64I-NEXT: li a1, 25 +; RV64I-NEXT: slli a1, a1, 34 +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: mulhu a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64ZBA-LABEL: zext_mul100: +; RV64ZBA: # %bb.0: +; RV64ZBA-NEXT: slli.uw a0, a0, 2 +; RV64ZBA-NEXT: sh2add a0, a0, a0 +; RV64ZBA-NEXT: sh2add a0, a0, a0 +; RV64ZBA-NEXT: ret +; +; RV64XANDESPERF-LABEL: zext_mul100: +; RV64XANDESPERF: # %bb.0: +; RV64XANDESPERF-NEXT: slli a0, a0, 32 +; RV64XANDESPERF-NEXT: srli a0, a0, 30 +; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0 +; RV64XANDESPERF-NEXT: nds.lea.w a0, a0, a0 +; RV64XANDESPERF-NEXT: ret + %b = zext i32 %a to i64 + %c = mul i64 %b, 100 + ret i64 %c +} + define i64 @zext_mul160(i32 signext %a) { ; RV64I-LABEL: zext_mul160: ; RV64I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll index bf6802d..93b68b0 100644 --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -1834,13 +1834,12 @@ define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, ptr %res) { ; RV32ZICOND-NEXT: mul a5, a3, a0 ; RV32ZICOND-NEXT: mul a6, a1, a2 ; RV32ZICOND-NEXT: mulhu a7, a0, a2 -; RV32ZICOND-NEXT: snez t0, a3 +; RV32ZICOND-NEXT: add a5, a6, a5 +; RV32ZICOND-NEXT: snez a6, a3 ; RV32ZICOND-NEXT: mulhu a3, a3, a0 -; RV32ZICOND-NEXT: mul t1, a0, a2 +; RV32ZICOND-NEXT: mul t0, a0, a2 ; RV32ZICOND-NEXT: mulhu a0, a1, a2 -; RV32ZICOND-NEXT: snez a1, a1 -; RV32ZICOND-NEXT: add a5, a6, a5 -; RV32ZICOND-NEXT: and a1, a1, t0 +; RV32ZICOND-NEXT: czero.eqz a1, a6, a1 ; RV32ZICOND-NEXT: snez a0, a0 ; RV32ZICOND-NEXT: snez a2, a3 ; RV32ZICOND-NEXT: add a5, a7, a5 @@ -1848,7 +1847,7 @@ define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, ptr %res) { ; RV32ZICOND-NEXT: sltu a1, a5, a7 ; RV32ZICOND-NEXT: or a0, a0, a2 ; RV32ZICOND-NEXT: or a0, a0, a1 -; RV32ZICOND-NEXT: sw t1, 0(a4) +; RV32ZICOND-NEXT: sw t0, 0(a4) ; RV32ZICOND-NEXT: sw a5, 4(a4) ; RV32ZICOND-NEXT: ret ; @@ -3690,11 +3689,10 @@ define i64 @umulo.select.i64(i64 %v1, i64 %v2) { ; RV32ZICOND-NEXT: mul a5, a1, a2 ; RV32ZICOND-NEXT: snez a6, a3 ; RV32ZICOND-NEXT: add a4, a5, a4 -; RV32ZICOND-NEXT: snez a5, a1 -; RV32ZICOND-NEXT: and a5, a5, a6 -; RV32ZICOND-NEXT: mulhu a6, a1, a2 -; RV32ZICOND-NEXT: snez a6, a6 -; RV32ZICOND-NEXT: or a5, a5, a6 +; RV32ZICOND-NEXT: mulhu a5, a1, a2 +; RV32ZICOND-NEXT: czero.eqz a6, a6, a1 +; RV32ZICOND-NEXT: snez a5, a5 +; RV32ZICOND-NEXT: or a5, a6, a5 ; RV32ZICOND-NEXT: mulhu a6, a0, a2 ; RV32ZICOND-NEXT: add a4, a6, a4 ; RV32ZICOND-NEXT: sltu a4, a4, a6 @@ -3783,18 +3781,17 @@ define i1 @umulo.not.i64(i64 %v1, i64 %v2) { ; RV32ZICOND: # %bb.0: # %entry ; RV32ZICOND-NEXT: mul a4, a3, a0 ; RV32ZICOND-NEXT: mul a5, a1, a2 -; RV32ZICOND-NEXT: mulhu a6, a0, a2 +; RV32ZICOND-NEXT: add a4, a5, a4 +; RV32ZICOND-NEXT: mulhu a5, a0, a2 ; RV32ZICOND-NEXT: mulhu a0, a3, a0 ; RV32ZICOND-NEXT: snez a3, a3 ; RV32ZICOND-NEXT: mulhu a2, a1, a2 -; RV32ZICOND-NEXT: snez a1, a1 -; RV32ZICOND-NEXT: add a4, a5, a4 -; RV32ZICOND-NEXT: and a1, a1, a3 +; RV32ZICOND-NEXT: czero.eqz a1, a3, a1 ; RV32ZICOND-NEXT: snez a2, a2 ; RV32ZICOND-NEXT: snez a0, a0 -; RV32ZICOND-NEXT: add a4, a6, a4 +; RV32ZICOND-NEXT: add a4, a5, a4 ; RV32ZICOND-NEXT: or a1, a1, a2 -; RV32ZICOND-NEXT: sltu a2, a4, a6 +; RV32ZICOND-NEXT: sltu a2, a4, a5 ; RV32ZICOND-NEXT: or a0, a1, a0 ; RV32ZICOND-NEXT: or a0, a0, a2 ; RV32ZICOND-NEXT: xori a0, a0, 1 @@ -5156,18 +5153,17 @@ define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) { ; RV32ZICOND: # %bb.0: # %entry ; RV32ZICOND-NEXT: mul a4, a3, a0 ; RV32ZICOND-NEXT: mul a5, a1, a2 -; RV32ZICOND-NEXT: mulhu a6, a0, a2 +; RV32ZICOND-NEXT: add a4, a5, a4 +; RV32ZICOND-NEXT: mulhu a5, a0, a2 ; RV32ZICOND-NEXT: mulhu a0, a3, a0 ; RV32ZICOND-NEXT: snez a3, a3 ; RV32ZICOND-NEXT: mulhu a2, a1, a2 -; RV32ZICOND-NEXT: snez a1, a1 -; RV32ZICOND-NEXT: add a4, a5, a4 -; RV32ZICOND-NEXT: and a1, a1, a3 +; RV32ZICOND-NEXT: czero.eqz a1, a3, a1 ; RV32ZICOND-NEXT: snez a2, a2 ; RV32ZICOND-NEXT: snez a0, a0 -; RV32ZICOND-NEXT: add a4, a6, a4 +; RV32ZICOND-NEXT: add a4, a5, a4 ; RV32ZICOND-NEXT: or a1, a1, a2 -; RV32ZICOND-NEXT: sltu a2, a4, a6 +; RV32ZICOND-NEXT: sltu a2, a4, a5 ; RV32ZICOND-NEXT: or a0, a1, a0 ; RV32ZICOND-NEXT: or a0, a0, a2 ; RV32ZICOND-NEXT: beqz a0, .LBB64_2 diff --git a/llvm/test/CodeGen/RISCV/zicond-opts.ll b/llvm/test/CodeGen/RISCV/zicond-opts.ll index 305ab93..c6d7298 100644 --- a/llvm/test/CodeGen/RISCV/zicond-opts.ll +++ b/llvm/test/CodeGen/RISCV/zicond-opts.ll @@ -7,22 +7,132 @@ define i32 @icmp_and(i64 %x, i64 %y) { ; RV32ZICOND-LABEL: icmp_and: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: or a2, a2, a3 +; RV32ZICOND-NEXT: snez a2, a2 +; RV32ZICOND-NEXT: or a0, a0, a1 +; RV32ZICOND-NEXT: czero.eqz a0, a2, a0 +; RV32ZICOND-NEXT: ret +; +; RV64ZICOND-LABEL: icmp_and: +; RV64ZICOND: # %bb.0: +; RV64ZICOND-NEXT: snez a1, a1 +; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 +; RV64ZICOND-NEXT: ret + %3 = icmp ne i64 %y, 0 + %4 = icmp ne i64 %x, 0 + %5 = and i1 %4, %3 + %6 = zext i1 %5 to i32 + ret i32 %6 +} + +; Make sure we choose the replace the single use icmp +define i32 @icmp_and_x_multiple_uses(i64 %x, i64 %y) { +; RV32ZICOND-LABEL: icmp_and_x_multiple_uses: +; RV32ZICOND: # %bb.0: +; RV32ZICOND-NEXT: or a2, a2, a3 +; RV32ZICOND-NEXT: or a0, a0, a1 +; RV32ZICOND-NEXT: snez a0, a0 +; RV32ZICOND-NEXT: czero.eqz a1, a0, a2 +; RV32ZICOND-NEXT: add a0, a1, a0 +; RV32ZICOND-NEXT: ret +; +; RV64ZICOND-LABEL: icmp_and_x_multiple_uses: +; RV64ZICOND: # %bb.0: +; RV64ZICOND-NEXT: snez a0, a0 +; RV64ZICOND-NEXT: czero.eqz a1, a0, a1 +; RV64ZICOND-NEXT: add a0, a1, a0 +; RV64ZICOND-NEXT: ret + %3 = icmp ne i64 %y, 0 + %4 = icmp ne i64 %x, 0 + %5 = and i1 %4, %3 + %6 = zext i1 %5 to i32 + %7 = zext i1 %4 to i32 + %8 = add i32 %6, %7 + ret i32 %8 +} + +; Make sure we choose the replace the single use icmp +define i32 @icmp_and_y_multiple_uses(i64 %x, i64 %y) { +; RV32ZICOND-LABEL: icmp_and_y_multiple_uses: +; RV32ZICOND: # %bb.0: +; RV32ZICOND-NEXT: or a2, a2, a3 +; RV32ZICOND-NEXT: snez a2, a2 +; RV32ZICOND-NEXT: or a0, a0, a1 +; RV32ZICOND-NEXT: czero.eqz a0, a2, a0 +; RV32ZICOND-NEXT: add a0, a0, a2 +; RV32ZICOND-NEXT: ret +; +; RV64ZICOND-LABEL: icmp_and_y_multiple_uses: +; RV64ZICOND: # %bb.0: +; RV64ZICOND-NEXT: snez a1, a1 +; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 +; RV64ZICOND-NEXT: add a0, a0, a1 +; RV64ZICOND-NEXT: ret + %3 = icmp ne i64 %y, 0 + %4 = icmp ne i64 %x, 0 + %5 = and i1 %4, %3 + %6 = zext i1 %5 to i32 + %7 = zext i1 %3 to i32 + %8 = add i32 %6, %7 + ret i32 %8 +} + +; Both icmp's have multiple uses, don't optimize +define i32 @icmp_and_xy_multiple_uses(i64 %x, i64 %y) { +; RV32ZICOND-LABEL: icmp_and_xy_multiple_uses: +; RV32ZICOND: # %bb.0: +; RV32ZICOND-NEXT: or a2, a2, a3 ; RV32ZICOND-NEXT: or a0, a0, a1 ; RV32ZICOND-NEXT: snez a1, a2 ; RV32ZICOND-NEXT: snez a0, a0 -; RV32ZICOND-NEXT: and a0, a0, a1 +; RV32ZICOND-NEXT: and a2, a0, a1 +; RV32ZICOND-NEXT: add a0, a1, a0 +; RV32ZICOND-NEXT: add a0, a2, a0 ; RV32ZICOND-NEXT: ret ; -; RV64ZICOND-LABEL: icmp_and: +; RV64ZICOND-LABEL: icmp_and_xy_multiple_uses: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: snez a1, a1 ; RV64ZICOND-NEXT: snez a0, a0 -; RV64ZICOND-NEXT: and a0, a0, a1 +; RV64ZICOND-NEXT: and a2, a0, a1 +; RV64ZICOND-NEXT: add a0, a1, a0 +; RV64ZICOND-NEXT: add a0, a2, a0 ; RV64ZICOND-NEXT: ret %3 = icmp ne i64 %y, 0 %4 = icmp ne i64 %x, 0 %5 = and i1 %4, %3 %6 = zext i1 %5 to i32 + %7 = zext i1 %3 to i32 + %8 = zext i1 %4 to i32 + %9 = add i32 %6, %7 + %10 = add i32 %9, %8 + ret i32 %10 +} + + +; (and (icmp x. 0, ne), (icmp y, 0, ne)) -> (czero.eqz (icmp x, 0, ne), y) +define i32 @icmp_and_select(i64 %x, i64 %y, i32 %z) { +; RV32ZICOND-LABEL: icmp_and_select: +; RV32ZICOND: # %bb.0: +; RV32ZICOND-NEXT: sgtz a5, a3 +; RV32ZICOND-NEXT: snez a2, a2 +; RV32ZICOND-NEXT: czero.eqz a5, a5, a3 +; RV32ZICOND-NEXT: czero.nez a2, a2, a3 +; RV32ZICOND-NEXT: or a2, a2, a5 +; RV32ZICOND-NEXT: or a0, a0, a1 +; RV32ZICOND-NEXT: czero.eqz a0, a2, a0 +; RV32ZICOND-NEXT: czero.eqz a0, a4, a0 +; RV32ZICOND-NEXT: ret +; +; RV64ZICOND-LABEL: icmp_and_select: +; RV64ZICOND: # %bb.0: +; RV64ZICOND-NEXT: sgtz a1, a1 +; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 +; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 +; RV64ZICOND-NEXT: ret + %3 = icmp sgt i64 %y, 0 + %4 = icmp ne i64 %x, 0 + %5 = and i1 %4, %3 + %6 = select i1 %5, i32 %z, i32 0 ret i32 %6 } @@ -32,21 +142,17 @@ define i32 @icmp_and_and(i64 %x, i64 %y, i64 %z) { ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: or a2, a2, a3 ; RV32ZICOND-NEXT: or a0, a0, a1 -; RV32ZICOND-NEXT: or a4, a4, a5 -; RV32ZICOND-NEXT: snez a1, a2 ; RV32ZICOND-NEXT: snez a0, a0 -; RV32ZICOND-NEXT: and a0, a1, a0 -; RV32ZICOND-NEXT: snez a1, a4 -; RV32ZICOND-NEXT: and a0, a1, a0 +; RV32ZICOND-NEXT: czero.eqz a0, a0, a2 +; RV32ZICOND-NEXT: or a4, a4, a5 +; RV32ZICOND-NEXT: czero.eqz a0, a0, a4 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: icmp_and_and: ; RV64ZICOND: # %bb.0: -; RV64ZICOND-NEXT: snez a1, a1 ; RV64ZICOND-NEXT: snez a0, a0 -; RV64ZICOND-NEXT: and a0, a1, a0 -; RV64ZICOND-NEXT: snez a1, a2 -; RV64ZICOND-NEXT: and a0, a1, a0 +; RV64ZICOND-NEXT: czero.eqz a0, a0, a1 +; RV64ZICOND-NEXT: czero.eqz a0, a0, a2 ; RV64ZICOND-NEXT: ret %4 = icmp ne i64 %y, 0 %5 = icmp ne i64 %x, 0 diff --git a/llvm/test/CodeGen/SPIRV/ComparePointers.ll b/llvm/test/CodeGen/SPIRV/ComparePointers.ll index 408b955..bc1514e 100644 --- a/llvm/test/CodeGen/SPIRV/ComparePointers.ll +++ b/llvm/test/CodeGen/SPIRV/ComparePointers.ll @@ -12,7 +12,7 @@ ;; return; ;; } -; CHECK-SPIRV: OpConvertPtrToU +; CHECK-SPIRV: OpSpecConstantOp %[[#]] ConvertPtrToU ; CHECK-SPIRV: OpConvertPtrToU ; CHECK-SPIRV: OpINotEqual ; CHECK-SPIRV: OpConvertPtrToU diff --git a/llvm/test/CodeGen/SPIRV/complex-constexpr.ll b/llvm/test/CodeGen/SPIRV/complex-constexpr.ll index e2c1d00..a97a124 100644 --- a/llvm/test/CodeGen/SPIRV/complex-constexpr.ll +++ b/llvm/test/CodeGen/SPIRV/complex-constexpr.ll @@ -6,7 +6,7 @@ define linkonce_odr hidden spir_func void @test() { entry: ; CHECK: %[[#MinusOne:]] = OpConstant %[[#]] 18446744073709551615 -; CHECK: %[[#Ptr:]] = OpConvertUToPtr %[[#]] %[[#MinusOne]] +; CHECK: %[[#Ptr:]] = OpSpecConstantOp %[[#]] ConvertUToPtr %[[#MinusOne]] ; CHECK: %[[#PtrCast:]] = OpPtrCastToGeneric %[[#]] %[[#]] ; CHECK: %[[#]] = OpFunctionCall %[[#]] %[[#]] %[[#PtrCast]] %[[#Ptr]] diff --git a/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtrInGlobalInit.ll b/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtrInGlobalInit.ll new file mode 100644 index 0000000..f397030 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtrInGlobalInit.ll @@ -0,0 +1,49 @@ +; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: %[[Int8Ty:[0-9]+]] = OpTypeInt 8 0 +; CHECK: %[[Int8PtrTy:[0-9]+]] = OpTypePointer Generic %[[Int8Ty]] +; CHECK-DAG: %[[GlobInt8PtrTy:[0-9]+]] = OpTypePointer CrossWorkgroup %[[Int8Ty]] +; CHECK: %[[GlobInt8PtrPtrTy:[0-9]+]] = OpTypePointer CrossWorkgroup %[[GlobInt8PtrTy]] +; CHECK: %[[Int8PtrGlobPtrPtrTy:[0-9]+]] = OpTypePointer Generic %[[GlobInt8PtrPtrTy]] +; CHECK: %[[Int32Ty:[0-9]+]] = OpTypeInt 32 0 +; CHECK: %[[Const5:[0-9]+]] = OpConstant %[[Int32Ty]] 5 +; CHECK: %[[ArrTy:[0-9]+]] = OpTypeArray %[[GlobInt8PtrTy]] %[[Const5]] +; CHECK: %[[VtblTy:[0-9]+]] = OpTypeStruct %[[ArrTy]] %[[ArrTy]] %[[ArrTy]] %[[ArrTy]] %[[ArrTy]] +; CHECK: %[[Int64Ty:[0-9]+]] = OpTypeInt 64 0 +; CHECK: %[[GlobVtblPtrTy:[0-9]+]] = OpTypePointer CrossWorkgroup %[[VtblTy]] +; CHECK: %[[ConstMinus184:[0-9]+]] = OpConstant %[[Int64Ty]] 18446744073709551432 +; CHECK: %[[ConstMinus16:[0-9]+]] = OpConstant %[[Int64Ty]] 18446744073709551600 +; CHECK: %[[Const168:[0-9]+]] = OpConstant %[[Int64Ty]] 168 +; CHECK: %[[Nullptr:[0-9]+]] = OpConstantNull %[[GlobInt8PtrTy]] +; CHECK: %[[Const184:[0-9]+]] = OpConstant %[[Int64Ty]] 184 +; CHECK: %[[Const184toPtr:[0-9]+]] = OpSpecConstantOp %[[GlobInt8PtrTy]] ConvertUToPtr %[[Const184]] +; CHECK: %[[Const168toPtr:[0-9]+]] = OpSpecConstantOp %[[GlobInt8PtrTy]] ConvertUToPtr %[[Const168]] +; CHECK: %[[ConstMinus16toPtr:[0-9]+]] = OpSpecConstantOp %[[GlobInt8PtrTy]] ConvertUToPtr %[[ConstMinus16]] +; CHECK: %[[ConstMinus184toPtr:[0-9]+]] = OpSpecConstantOp %[[GlobInt8PtrTy]] ConvertUToPtr %[[ConstMinus184]] +; CHECK: %[[Vtbl012:[0-9]+]] = OpConstantComposite %[[ArrTy]] %[[Const184toPtr]] %[[Nullptr]] %[[Nullptr]] %[[Nullptr]] %[[Nullptr]] +; CHECK: %[[Vtbl3:[0-9]+]] = OpConstantComposite %[[ArrTy]] %[[Const168toPtr]] %[[ConstMinus16toPtr]] %[[Nullptr]] %[[Nullptr]] %[[Nullptr]] +; CHECK: %[[Vtbl4:[0-9]+]] = OpConstantComposite %[[ArrTy]] %[[ConstMinus184toPtr]] %[[ConstMinus184toPtr]] %[[Nullptr]] %[[Nullptr]] %[[Nullptr]] +; CHECK: %[[Vtbl:[0-9]+]] = OpConstantComposite %[[VtblTy]] %[[Vtbl012]] %[[Vtbl012]] %[[Vtbl012]] %[[Vtbl3]] %[[Vtbl4]] +; CHECK: %[[#]] = OpVariable %[[GlobVtblPtrTy]] CrossWorkgroup %[[Vtbl]] + +@vtable = linkonce_odr unnamed_addr addrspace(1) constant { [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)] } + { [5 x ptr addrspace(1)] [ptr addrspace(1) inttoptr (i64 184 to ptr addrspace(1)), ptr addrspace(1) null, ptr addrspace(1) null, ptr addrspace(1) null, ptr addrspace(1) null], + [5 x ptr addrspace(1)] [ptr addrspace(1) inttoptr (i64 184 to ptr addrspace(1)), ptr addrspace(1) null, ptr addrspace(1) null, ptr addrspace(1) null, ptr addrspace(1) null], + [5 x ptr addrspace(1)] [ptr addrspace(1) inttoptr (i64 184 to ptr addrspace(1)), ptr addrspace(1) null, ptr addrspace(1) null, ptr addrspace(1) null, ptr addrspace(1) null], + [5 x ptr addrspace(1)] [ptr addrspace(1) inttoptr (i64 168 to ptr addrspace(1)), ptr addrspace(1) inttoptr (i64 -16 to ptr addrspace(1)), ptr addrspace(1) null, ptr addrspace(1) null, ptr addrspace(1) null], + [5 x ptr addrspace(1)] [ptr addrspace(1) inttoptr (i64 -184 to ptr addrspace(1)), ptr addrspace(1) inttoptr (i64 -184 to ptr addrspace(1)), ptr addrspace(1) null, ptr addrspace(1) null, ptr addrspace(1) null] } + +define linkonce_odr spir_func void @foo(ptr addrspace(4) %this) { +entry: + %0 = getelementptr inbounds i8, ptr addrspace(4) %this, i64 184 + store ptr addrspace(1) getelementptr inbounds inrange(-24, 16) ({ [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)] }, ptr addrspace(1) @vtable, i32 0, i32 0, i32 3), ptr addrspace(4) %this + store ptr addrspace(1) getelementptr inbounds inrange(-24, 16) ({ [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)] }, ptr addrspace(1) @vtable, i32 0, i32 1, i32 3), ptr addrspace(4) %this + store ptr addrspace(1) getelementptr inbounds inrange(-24, 16) ({ [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)] }, ptr addrspace(1) @vtable, i32 0, i32 2, i32 3), ptr addrspace(4) %this + %add.ptr = getelementptr inbounds i8, ptr addrspace(4) %this, i64 184 + store ptr addrspace(1) getelementptr inbounds inrange(-24, 16) ({ [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)] }, ptr addrspace(1) @vtable, i32 0, i32 4, i32 3), ptr addrspace(4) %add.ptr + %add.ptr2 = getelementptr inbounds i8, ptr addrspace(4) %this, i64 16 + store ptr addrspace(1) getelementptr inbounds inrange(-24, 16) ({ [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)], [5 x ptr addrspace(1)] }, ptr addrspace(1) @vtable, i32 0, i32 3, i32 3), ptr addrspace(4) %add.ptr2 + + ret void +} diff --git a/llvm/test/CodeGen/X86/avx10_2_512bf16-arith.ll b/llvm/test/CodeGen/X86/avx10_2_512bf16-arith.ll index 79849a7..d9b4635 100644 --- a/llvm/test/CodeGen/X86/avx10_2_512bf16-arith.ll +++ b/llvm/test/CodeGen/X86/avx10_2_512bf16-arith.ll @@ -94,8 +94,8 @@ define <32 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_512(<32 x bfloat> %src, ; ; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_512: ; X86: # %bb.0: -; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vsubbf16 %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xc9,0x5c,0xc2] ; X86-NEXT: vsubbf16 (%eax), %zmm1, %zmm1 # encoding: [0x62,0xf5,0x75,0x48,0x5c,0x08] ; X86-NEXT: vsubbf16 %zmm1, %zmm0, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x5c,0xc1] diff --git a/llvm/test/CodeGen/X86/avx10_2bf16-arith.ll b/llvm/test/CodeGen/X86/avx10_2bf16-arith.ll index 0f2c75b..01b7618 100644 --- a/llvm/test/CodeGen/X86/avx10_2bf16-arith.ll +++ b/llvm/test/CodeGen/X86/avx10_2bf16-arith.ll @@ -147,8 +147,8 @@ define <16 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_256(<16 x bfloat> %src, ; ; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_256: ; X86: # %bb.0: -; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] ; X86-NEXT: vsubbf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x5c,0xc2] ; X86-NEXT: vsubbf16 (%eax), %ymm1, %ymm1 # encoding: [0x62,0xf5,0x75,0x28,0x5c,0x08] ; X86-NEXT: vsubbf16 %ymm1, %ymm0, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x5c,0xc1] @@ -201,8 +201,8 @@ define <8 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_128(<8 x bfloat> %src, <8 ; ; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_128: ; X86: # %bb.0: -; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] ; X86-NEXT: vsubbf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x5c,0xc2] ; X86-NEXT: vsubbf16 (%eax), %xmm1, %xmm1 # encoding: [0x62,0xf5,0x75,0x08,0x5c,0x08] ; X86-NEXT: vsubbf16 %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x5c,0xc1] diff --git a/llvm/test/CodeGen/X86/narrow-add-i64.ll b/llvm/test/CodeGen/X86/narrow-add-i64.ll new file mode 100644 index 0000000..a7a54fd --- /dev/null +++ b/llvm/test/CodeGen/X86/narrow-add-i64.ll @@ -0,0 +1,94 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 + +define i64 @test_add_i64_i16_const(i16 %a) nounwind { +; X86-LABEL: test_add_i64_i16_const: +; X86: # %bb.0: +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X86-NEXT: addl $42, %eax +; X86-NEXT: xorl %edx, %edx +; X86-NEXT: retl +; +; X64-LABEL: test_add_i64_i16_const: +; X64: # %bb.0: +; X64-NEXT: movzwl %di, %eax +; X64-NEXT: addq $42, %rax +; X64-NEXT: retq + %zext_a = zext i16 %a to i64 + %sum = add nuw nsw i64 %zext_a, 42 + ret i64 %sum +} + +; TODO: First 48 bits are all zeros so we can safely truncate to 32 bit additon +define i64 @test_add_i64_i16_zext(i16 %a, i16 %b) nounwind { +; X86-LABEL: test_add_i64_i16_zext: +; X86: # %bb.0: +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X86-NEXT: addl %ecx, %eax +; X86-NEXT: xorl %edx, %edx +; X86-NEXT: retl +; +; X64-LABEL: test_add_i64_i16_zext: +; X64: # %bb.0: +; X64-NEXT: movzwl %di, %ecx +; X64-NEXT: movzwl %si, %eax +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: retq + %zext_a = zext i16 %a to i64 + %zext_b = zext i16 %b to i64 + %sum = add nuw nsw i64 %zext_a, %zext_b + ret i64 %sum +} + +; Negative: Set the 32nd bit of a to force 64 bit addition, we do not truncate to 32 bit addition in this case +define i64 @negative_test_add_i64_i16(i16 %a) nounwind { +; X86-LABEL: negative_test_add_i64_i16: +; X86: # %bb.0: +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X86-NEXT: addl $42, %eax +; X86-NEXT: movl $1, %edx +; X86-NEXT: retl +; +; X64-LABEL: negative_test_add_i64_i16: +; X64: # %bb.0: +; X64-NEXT: movzwl %di, %ecx +; X64-NEXT: movabsq $4294967338, %rax # imm = 0x10000002A +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: retq + %zext_a = zext i16 %a to i64 + %or_a = or i64 %zext_a, 4294967296 + %sum = add nuw nsw i64 %or_a, 42 + ret i64 %sum +} + +; Negative: We don't truncate to 32 bit addition in case of sign extension +define i64 @negative_test_add_i64_i16_sext(i16 %a, i16 %b) nounwind { +; X86-LABEL: negative_test_add_i64_i16_sext: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl %ecx, %esi +; X86-NEXT: sarl $31, %esi +; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %edx +; X86-NEXT: sarl $31, %edx +; X86-NEXT: addl %ecx, %eax +; X86-NEXT: adcl %esi, %edx +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: negative_test_add_i64_i16_sext: +; X64: # %bb.0: +; X64-NEXT: # kill: def $esi killed $esi def $rsi +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: movswq %di, %rcx +; X64-NEXT: movswq %si, %rax +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: retq + %sext_a = sext i16 %a to i64 + %sext_b = sext i16 %b to i64 + %sum = add nuw nsw i64 %sext_a, %sext_b + ret i64 %sum +} diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll index 84c2cc6..7735500 100644 --- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll +++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll @@ -168,8 +168,8 @@ define void @load_2byte_chunk_of_4byte_alloca(ptr %src, i64 %byteOff, ptr %dst) define void @load_1byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind { ; X64-NO-BMI2-LABEL: load_1byte_chunk_of_8byte_alloca: ; X64-NO-BMI2: # %bb.0: -; X64-NO-BMI2-NEXT: movq (%rdi), %rax ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx +; X64-NO-BMI2-NEXT: movq (%rdi), %rax ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-NO-BMI2-NEXT: shrq %cl, %rax ; X64-NO-BMI2-NEXT: movb %al, (%rdx) @@ -188,17 +188,15 @@ define void @load_1byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax -; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi -; X86-NO-BMI2-NO-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] -; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %ebx +; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ecx), %esi +; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%ecx), %ebx ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi -; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl ; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %edi +; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl ; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi ; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edi ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx @@ -215,13 +213,11 @@ define void @load_1byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X86-NO-BMI2-HAVE-SHLD: # %bb.0: ; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NO-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx -; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi -; X86-NO-BMI2-HAVE-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] -; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx +; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%edx), %esi +; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %edx ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi ; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edx ; X86-NO-BMI2-HAVE-SHLD-NEXT: testb $32, %cl @@ -236,14 +232,11 @@ define void @load_1byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx -; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %edx -; X86-HAVE-BMI2-NO-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] -; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi -; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx +; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edx), %esi +; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%edx), %edx ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx ; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl ; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi @@ -260,23 +253,19 @@ define void @load_1byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; ; X86-HAVE-BMI2-HAVE-SHLD-LABEL: load_1byte_chunk_of_8byte_alloca: ; X86-HAVE-BMI2-HAVE-SHLD: # %bb.0: -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %ebx ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %ebx +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edx), %esi +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %edx +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %edx, %edx ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: testb $32, %cl -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %ebx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movb %bl, (%eax) +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: cmovel %esi, %edx +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movb %dl, (%eax) ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %ebx ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: retl %init = load <8 x i8>, ptr %src, align 1 %byteOff.numbits = shl nuw nsw i64 %byteOff, 3 @@ -292,8 +281,8 @@ define void @load_1byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) define void @load_2byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind { ; X64-NO-BMI2-LABEL: load_2byte_chunk_of_8byte_alloca: ; X64-NO-BMI2: # %bb.0: -; X64-NO-BMI2-NEXT: movq (%rdi), %rax ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx +; X64-NO-BMI2-NEXT: movq (%rdi), %rax ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-NO-BMI2-NEXT: shrq %cl, %rax ; X64-NO-BMI2-NEXT: movw %ax, (%rdx) @@ -312,17 +301,15 @@ define void @load_2byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax -; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %edi -; X86-NO-BMI2-NO-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] -; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi +; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ecx), %edi +; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%ecx), %esi ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi -; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl ; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx +; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl ; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx ; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx @@ -339,18 +326,16 @@ define void @load_2byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X86-NO-BMI2-HAVE-SHLD: # %bb.0: ; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NO-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx -; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx -; X86-NO-BMI2-HAVE-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] -; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi -; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx -; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi +; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%edx), %esi +; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %edx +; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi +; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edx ; X86-NO-BMI2-HAVE-SHLD-NEXT: testb $32, %cl -; X86-NO-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %esi -; X86-NO-BMI2-HAVE-SHLD-NEXT: movw %si, (%eax) +; X86-NO-BMI2-HAVE-SHLD-NEXT: cmovel %esi, %edx +; X86-NO-BMI2-HAVE-SHLD-NEXT: movw %dx, (%eax) ; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi ; X86-NO-BMI2-HAVE-SHLD-NEXT: retl ; @@ -360,14 +345,11 @@ define void @load_2byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx -; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %edx -; X86-HAVE-BMI2-NO-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] -; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi -; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx +; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edx), %esi +; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%edx), %edx ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx ; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl ; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi @@ -386,18 +368,16 @@ define void @load_2byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X86-HAVE-BMI2-HAVE-SHLD: # %bb.0: ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %esi +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edx), %esi +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %edx +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %edx, %edx ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: testb $32, %cl -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %esi -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movw %si, (%eax) +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: cmovel %esi, %edx +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movw %dx, (%eax) ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: retl %init = load <8 x i8>, ptr %src, align 1 @@ -413,8 +393,8 @@ define void @load_2byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) define void @load_4byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind { ; X64-NO-BMI2-LABEL: load_4byte_chunk_of_8byte_alloca: ; X64-NO-BMI2: # %bb.0: -; X64-NO-BMI2-NEXT: movq (%rdi), %rax ; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx +; X64-NO-BMI2-NEXT: movq (%rdi), %rax ; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-NO-BMI2-NEXT: shrq %cl, %rax ; X64-NO-BMI2-NEXT: movl %eax, (%rdx) @@ -433,17 +413,15 @@ define void @load_4byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi ; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax -; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %edi -; X86-NO-BMI2-NO-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] -; X86-NO-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi +; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ecx), %edi +; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%ecx), %esi ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx ; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi -; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl ; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx +; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl ; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx ; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx ; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx @@ -460,18 +438,16 @@ define void @load_4byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X86-NO-BMI2-HAVE-SHLD: # %bb.0: ; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NO-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx -; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx -; X86-NO-BMI2-HAVE-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] -; X86-NO-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi -; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx -; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi +; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%edx), %esi +; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %edx +; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi +; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edx ; X86-NO-BMI2-HAVE-SHLD-NEXT: testb $32, %cl -; X86-NO-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %esi -; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, (%eax) +; X86-NO-BMI2-HAVE-SHLD-NEXT: cmovel %esi, %edx +; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, (%eax) ; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi ; X86-NO-BMI2-HAVE-SHLD-NEXT: retl ; @@ -481,14 +457,11 @@ define void @load_4byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi ; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx -; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %edx -; X86-HAVE-BMI2-NO-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] -; X86-HAVE-BMI2-NO-SHLD-NEXT: movd %xmm0, %esi -; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx +; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edx), %esi +; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%edx), %edx ; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx ; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl ; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi @@ -507,18 +480,16 @@ define void @load_4byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X86-HAVE-BMI2-HAVE-SHLD: # %bb.0: ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %edx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movd %xmm0, %esi -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %esi +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edx), %esi +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %edx +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %edx, %edx ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: testb $32, %cl -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: cmovel %edx, %esi -; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%eax) +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: cmovel %esi, %edx +; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, (%eax) ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi ; X86-HAVE-BMI2-HAVE-SHLD-NEXT: retl %init = load <8 x i8>, ptr %src, align 1 @@ -536,8 +507,8 @@ define void @load_4byte_chunk_of_8byte_alloca(ptr %src, i64 %byteOff, ptr %dst) define void @load_1byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind { ; X64-NO-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca: ; X64-NO-BMI2-NO-SHLD: # %bb.0: -; X64-NO-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-NO-SHLD-NEXT: shll $3, %esi +; X64-NO-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm0, %rdi @@ -557,8 +528,8 @@ define void @load_1byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X64-NO-BMI2-HAVE-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca: ; X64-NO-BMI2-HAVE-SHLD: # %bb.0: ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx -; X64-NO-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx +; X64-NO-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax ; X64-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi @@ -571,8 +542,8 @@ define void @load_1byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; ; X64-HAVE-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca: ; X64-HAVE-BMI2-NO-SHLD: # %bb.0: -; X64-HAVE-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi +; X64-HAVE-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rax ; X64-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rcx @@ -591,8 +562,8 @@ define void @load_1byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca: ; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0: ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx -; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx +; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi @@ -698,8 +669,8 @@ define void @load_1byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) define void @load_2byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind { ; X64-NO-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca: ; X64-NO-BMI2-NO-SHLD: # %bb.0: -; X64-NO-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-NO-SHLD-NEXT: shll $3, %esi +; X64-NO-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm0, %rdi @@ -719,8 +690,8 @@ define void @load_2byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X64-NO-BMI2-HAVE-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca: ; X64-NO-BMI2-HAVE-SHLD: # %bb.0: ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx -; X64-NO-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx +; X64-NO-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax ; X64-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi @@ -733,8 +704,8 @@ define void @load_2byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; ; X64-HAVE-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca: ; X64-HAVE-BMI2-NO-SHLD: # %bb.0: -; X64-HAVE-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi +; X64-HAVE-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rax ; X64-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rcx @@ -753,8 +724,8 @@ define void @load_2byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca: ; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0: ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx -; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx +; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi @@ -859,8 +830,8 @@ define void @load_2byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) define void @load_4byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind { ; X64-NO-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca: ; X64-NO-BMI2-NO-SHLD: # %bb.0: -; X64-NO-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-NO-SHLD-NEXT: shll $3, %esi +; X64-NO-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm0, %rdi @@ -880,8 +851,8 @@ define void @load_4byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X64-NO-BMI2-HAVE-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca: ; X64-NO-BMI2-HAVE-SHLD: # %bb.0: ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx -; X64-NO-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx +; X64-NO-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax ; X64-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi @@ -894,8 +865,8 @@ define void @load_4byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; ; X64-HAVE-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca: ; X64-HAVE-BMI2-NO-SHLD: # %bb.0: -; X64-HAVE-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi +; X64-HAVE-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rax ; X64-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rcx @@ -914,8 +885,8 @@ define void @load_4byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca: ; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0: ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx -; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx +; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi @@ -1020,8 +991,8 @@ define void @load_4byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) define void @load_8byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind { ; X64-NO-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca: ; X64-NO-BMI2-NO-SHLD: # %bb.0: -; X64-NO-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-NO-SHLD-NEXT: shll $3, %esi +; X64-NO-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax ; X64-NO-BMI2-NO-SHLD-NEXT: movq %xmm0, %rdi @@ -1041,8 +1012,8 @@ define void @load_8byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X64-NO-BMI2-HAVE-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca: ; X64-NO-BMI2-HAVE-SHLD: # %bb.0: ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx -; X64-NO-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx +; X64-NO-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax ; X64-NO-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi @@ -1055,8 +1026,8 @@ define void @load_8byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; ; X64-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca: ; X64-HAVE-BMI2-NO-SHLD: # %bb.0: -; X64-HAVE-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi +; X64-HAVE-BMI2-NO-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-NO-SHLD-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm1, %rax ; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %xmm0, %rcx @@ -1075,8 +1046,8 @@ define void @load_8byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst) ; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca: ; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0: ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, %rcx -; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shll $3, %ecx +; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movdqu (%rdi), %xmm0 ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rax ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %xmm0, %rsi diff --git a/llvm/test/Transforms/InstCombine/known-bits-lerp-pattern.ll b/llvm/test/Transforms/InstCombine/known-bits-lerp-pattern.ll new file mode 100644 index 0000000..5a33d35 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/known-bits-lerp-pattern.ll @@ -0,0 +1,181 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +; Test known bits refinements for pattern: a * (b - c) + c * d +; where a > 0, c > 0, b > 0, d > 0, and b > c. +; This pattern is a generalization of lerp and it appears frequently in graphics operations. + +define i32 @test_clamp(i8 %a, i8 %c, i8 %d) { +; CHECK-LABEL: define i32 @test_clamp( +; CHECK-SAME: i8 [[A:%.*]], i8 [[C:%.*]], i8 [[D:%.*]]) { +; CHECK-NEXT: [[A32:%.*]] = zext i8 [[A]] to i32 +; CHECK-NEXT: [[C32:%.*]] = zext i8 [[C]] to i32 +; CHECK-NEXT: [[D32:%.*]] = zext i8 [[D]] to i32 +; CHECK-NEXT: [[SUB:%.*]] = xor i32 [[C32]], 255 +; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i32 [[SUB]], [[A32]] +; CHECK-NEXT: [[MUL2:%.*]] = mul nuw nsw i32 [[C32]], [[D32]] +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[MUL2]] +; CHECK-NEXT: ret i32 [[ADD]] +; + %a32 = zext i8 %a to i32 + %c32 = zext i8 %c to i32 + %d32 = zext i8 %d to i32 + %sub = sub i32 255, %c32 + %mul1 = mul i32 %a32, %sub + %mul2 = mul i32 %c32, %d32 + %add = add i32 %mul1, %mul2 + %cmp = icmp ugt i32 %add, 65535 + %result = select i1 %cmp, i32 65535, i32 %add + ret i32 %result +} + +define i1 @test_trunc_cmp(i8 %a, i8 %c, i8 %d) { +; CHECK-LABEL: define i1 @test_trunc_cmp( +; CHECK-SAME: i8 [[A:%.*]], i8 [[C:%.*]], i8 [[D:%.*]]) { +; CHECK-NEXT: [[A32:%.*]] = zext i8 [[A]] to i32 +; CHECK-NEXT: [[C32:%.*]] = zext i8 [[C]] to i32 +; CHECK-NEXT: [[D32:%.*]] = zext i8 [[D]] to i32 +; CHECK-NEXT: [[SUB:%.*]] = xor i32 [[C32]], 255 +; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i32 [[SUB]], [[A32]] +; CHECK-NEXT: [[MUL2:%.*]] = mul nuw nsw i32 [[C32]], [[D32]] +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[MUL2]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ADD]], 1234 +; CHECK-NEXT: ret i1 [[CMP]] +; + %a32 = zext i8 %a to i32 + %c32 = zext i8 %c to i32 + %d32 = zext i8 %d to i32 + %sub = sub i32 255, %c32 + %mul1 = mul i32 %a32, %sub + %mul2 = mul i32 %c32, %d32 + %add = add i32 %mul1, %mul2 + %trunc = trunc i32 %add to i16 + %cmp = icmp eq i16 %trunc, 1234 + ret i1 %cmp +} + +define i1 @test_trunc_cmp_xor(i8 %a, i8 %c, i8 %d) { +; CHECK-LABEL: define i1 @test_trunc_cmp_xor( +; CHECK-SAME: i8 [[A:%.*]], i8 [[C:%.*]], i8 [[D:%.*]]) { +; CHECK-NEXT: [[A32:%.*]] = zext i8 [[A]] to i32 +; CHECK-NEXT: [[C32:%.*]] = zext i8 [[C]] to i32 +; CHECK-NEXT: [[D32:%.*]] = zext i8 [[D]] to i32 +; CHECK-NEXT: [[SUB:%.*]] = xor i32 [[C32]], 255 +; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i32 [[SUB]], [[A32]] +; CHECK-NEXT: [[MUL2:%.*]] = mul nuw nsw i32 [[C32]], [[D32]] +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[MUL2]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ADD]], 1234 +; CHECK-NEXT: ret i1 [[CMP]] +; + %a32 = zext i8 %a to i32 + %c32 = zext i8 %c to i32 + %d32 = zext i8 %d to i32 + %sub = xor i32 255, %c32 + %mul1 = mul i32 %a32, %sub + %mul2 = mul i32 %c32, %d32 + %add = add i32 %mul1, %mul2 + %trunc = trunc i32 %add to i16 + %cmp = icmp eq i16 %trunc, 1234 + ret i1 %cmp +} + +define i1 @test_trunc_cmp_arbitrary_b(i8 %a, i8 %b, i8 %c, i8 %d) { +; CHECK-LABEL: define i1 @test_trunc_cmp_arbitrary_b( +; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]], i8 [[D:%.*]]) { +; CHECK-NEXT: [[A32:%.*]] = zext i8 [[A]] to i32 +; CHECK-NEXT: [[B32:%.*]] = zext i8 [[B]] to i32 +; CHECK-NEXT: [[C32:%.*]] = zext i8 [[C]] to i32 +; CHECK-NEXT: [[D32:%.*]] = zext i8 [[D]] to i32 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 [[B32]], [[C32]] +; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i32 [[SUB]], [[A32]] +; CHECK-NEXT: [[MUL2:%.*]] = mul nuw nsw i32 [[C32]], [[D32]] +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[MUL2]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ADD]], 1234 +; CHECK-NEXT: ret i1 [[CMP]] +; + %a32 = zext i8 %a to i32 + %b32 = zext i8 %b to i32 + %c32 = zext i8 %c to i32 + %d32 = zext i8 %d to i32 + %sub = sub nsw nuw i32 %b32, %c32 + %mul1 = mul i32 %a32, %sub + %mul2 = mul i32 %c32, %d32 + %add = add i32 %mul1, %mul2 + %trunc = trunc i32 %add to i16 + %cmp = icmp eq i16 %trunc, 1234 + ret i1 %cmp +} + + +define i1 @test_trunc_cmp_no_a(i8 %b, i8 %c, i8 %d) { +; CHECK-LABEL: define i1 @test_trunc_cmp_no_a( +; CHECK-SAME: i8 [[B:%.*]], i8 [[C:%.*]], i8 [[D:%.*]]) { +; CHECK-NEXT: [[B32:%.*]] = zext i8 [[B]] to i32 +; CHECK-NEXT: [[C32:%.*]] = zext i8 [[C]] to i32 +; CHECK-NEXT: [[D32:%.*]] = zext i8 [[D]] to i32 +; CHECK-NEXT: [[MUL1:%.*]] = sub nuw nsw i32 [[B32]], [[C32]] +; CHECK-NEXT: [[MUL2:%.*]] = mul nuw nsw i32 [[C32]], [[D32]] +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[MUL2]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ADD]], 1234 +; CHECK-NEXT: ret i1 [[CMP]] +; + %b32 = zext i8 %b to i32 + %c32 = zext i8 %c to i32 + %d32 = zext i8 %d to i32 + %sub = sub nuw i32 %b32, %c32 + %mul2 = mul i32 %c32, %d32 + %add = add i32 %sub, %mul2 + %trunc = trunc i32 %add to i16 + %cmp = icmp eq i16 %trunc, 1234 + ret i1 %cmp +} + +define i1 @test_trunc_cmp_no_d(i8 %a, i8 %b, i8 %c) { +; CHECK-LABEL: define i1 @test_trunc_cmp_no_d( +; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) { +; CHECK-NEXT: [[A32:%.*]] = zext i8 [[A]] to i32 +; CHECK-NEXT: [[B32:%.*]] = zext i8 [[B]] to i32 +; CHECK-NEXT: [[C32:%.*]] = zext i8 [[C]] to i32 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 [[B32]], [[C32]] +; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i32 [[SUB]], [[A32]] +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[C32]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[ADD]], 1234 +; CHECK-NEXT: ret i1 [[CMP]] +; + %a32 = zext i8 %a to i32 + %b32 = zext i8 %b to i32 + %c32 = zext i8 %c to i32 + %sub = sub nsw nuw i32 %b32, %c32 + %mul1 = mul i32 %a32, %sub + %add = add i32 %mul1, %c32 + %trunc = trunc i32 %add to i16 + %cmp = icmp eq i16 %trunc, 1234 + ret i1 %cmp +} + +define i1 @test_trunc_cmp_xor_negative(i8 %a, i8 %c, i8 %d) { +; CHECK-LABEL: define i1 @test_trunc_cmp_xor_negative( +; CHECK-SAME: i8 [[A:%.*]], i8 [[C:%.*]], i8 [[D:%.*]]) { +; CHECK-NEXT: [[A32:%.*]] = zext i8 [[A]] to i32 +; CHECK-NEXT: [[C32:%.*]] = zext i8 [[C]] to i32 +; CHECK-NEXT: [[D32:%.*]] = zext i8 [[D]] to i32 +; CHECK-NEXT: [[SUB:%.*]] = xor i32 [[C32]], 234 +; CHECK-NEXT: [[MUL1:%.*]] = mul nuw nsw i32 [[SUB]], [[A32]] +; CHECK-NEXT: [[MUL2:%.*]] = mul nuw nsw i32 [[C32]], [[D32]] +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[MUL1]], [[MUL2]] +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[ADD]] to i16 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[TRUNC]], 1234 +; CHECK-NEXT: ret i1 [[CMP]] +; + %a32 = zext i8 %a to i32 + %c32 = zext i8 %c to i32 + %d32 = zext i8 %d to i32 + %sub = xor i32 234, %c32 + %mul1 = mul i32 %a32, %sub + %mul2 = mul i32 %c32, %d32 + %add = add i32 %mul1, %mul2 + ; We should keep the trunc in this case + %trunc = trunc i32 %add to i16 + %cmp = icmp eq i16 %trunc, 1234 + ret i1 %cmp +} diff --git a/llvm/test/Transforms/InstCombine/sink-dereferenceable-assume.ll b/llvm/test/Transforms/InstCombine/sink-dereferenceable-assume.ll new file mode 100644 index 0000000..9531323 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/sink-dereferenceable-assume.ll @@ -0,0 +1,31 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -p instcombine -S %s | FileCheck %s + +define i64 @test_sink_with_dereferenceable_assume(ptr %p, ptr %q, i1 %cond) { +; CHECK-LABEL: define i64 @test_sink_with_dereferenceable_assume( +; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[COND:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] +; CHECK: [[THEN]]: +; CHECK-NEXT: [[Q_INT:%.*]] = ptrtoint ptr [[Q]] to i64 +; CHECK-NEXT: [[P_INT:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[Q_INT]], [[P_INT]] +; CHECK-NEXT: ret i64 [[DIFF]] +; CHECK: [[ELSE]]: +; CHECK-NEXT: ret i64 0 +; +entry: + %p_int = ptrtoint ptr %p to i64 + %q_int = ptrtoint ptr %q to i64 + %diff = sub i64 %q_int, %p_int + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %p, i64 %diff) ] + br i1 %cond, label %then, label %else + +then: + ret i64 %diff + +else: + ret i64 0 +} + +declare void @llvm.assume(i1 noundef) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll index 3c83c01..7e58d9d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll @@ -59,13 +59,13 @@ define float @fmaxnum(ptr %src, i64 %n) { ; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]]) ; CHECK-NEXT: [[TMP8]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD2]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8 -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD2]] ; CHECK-NEXT: [[TMP18:%.*]] = freeze <4 x i1> [[TMP3]] ; CHECK-NEXT: [[TMP15:%.*]] = freeze <4 x i1> [[TMP4]] ; CHECK-NEXT: [[TMP5:%.*]] = or <4 x i1> [[TMP18]], [[TMP15]] ; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]]) +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP6]], [[TMP9]] ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -112,27 +112,84 @@ exit: ret float %max.next } +; TODO: Could fold pairs of `fcmp uno` together. define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) { ; CHECK-LABEL: define float @test_fmax_and_fmin( ; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 8 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] ; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] -; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4 -; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_0]], i32 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC_0]], align 4 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_1]], i32 4 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 +; CHECK-NEXT: [[TMP4]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI2]], <4 x float> [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP5]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI3]], <4 x float> [[WIDE_LOAD4]]) +; CHECK-NEXT: [[TMP6]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD5]]) +; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD6]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8 +; CHECK-NEXT: [[TMP8:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD5]], [[WIDE_LOAD5]] +; CHECK-NEXT: [[TMP9:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD6]] +; CHECK-NEXT: [[TMP14:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP15:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD4]], [[WIDE_LOAD4]] +; CHECK-NEXT: [[TMP12:%.*]] = or <4 x i1> [[TMP8]], [[TMP14]] +; CHECK-NEXT: [[TMP13:%.*]] = or <4 x i1> [[TMP9]], [[TMP15]] +; CHECK-NEXT: [[TMP16:%.*]] = freeze <4 x i1> [[TMP12]] +; CHECK-NEXT: [[TMP17:%.*]] = freeze <4 x i1> [[TMP13]] +; CHECK-NEXT: [[TMP18:%.*]] = or <4 x i1> [[TMP16]], [[TMP17]] +; CHECK-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP18]]) +; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: [[TMP20:%.*]] = or i1 [[TMP19]], [[TMP21]] +; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP23:%.*]] = select i1 [[TMP19]], <4 x float> [[VEC_PHI]], <4 x float> [[TMP6]] +; CHECK-NEXT: [[TMP24:%.*]] = select i1 [[TMP19]], <4 x float> [[VEC_PHI1]], <4 x float> [[TMP7]] +; CHECK-NEXT: [[TMP25:%.*]] = select i1 [[TMP19]], <4 x float> [[VEC_PHI2]], <4 x float> [[TMP4]] +; CHECK-NEXT: [[TMP26:%.*]] = select i1 [[TMP19]], <4 x float> [[VEC_PHI3]], <4 x float> [[TMP5]] +; CHECK-NEXT: [[TMP27:%.*]] = select i1 [[TMP19]], i64 [[IV]], i64 [[N_VEC]] +; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP23]], <4 x float> [[TMP24]]) +; CHECK-NEXT: [[TMP28:%.*]] = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[RDX_MINMAX]]) +; CHECK-NEXT: [[RDX_MINMAX9:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP25]], <4 x float> [[TMP26]]) +; CHECK-NEXT: [[TMP29:%.*]] = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> [[RDX_MINMAX9]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-NEXT: [[TMP30:%.*]] = xor i1 [[TMP19]], true +; CHECK-NEXT: [[TMP31:%.*]] = and i1 [[CMP_N]], [[TMP30]] +; CHECK-NEXT: br i1 [[TMP31]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP27]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP28]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX8:%.*]] = phi float [ [[TMP29]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MAX:%.*]] = phi float [ [[BC_MERGE_RDX8]], %[[SCALAR_PH]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV1]] +; CHECK-NEXT: [[GEP_SRC_3:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV1]] +; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_2]], align 4 +; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_3]], align 4 ; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]]) ; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]]) -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ], [ [[TMP29]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ], [ [[TMP28]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]] ; CHECK-NEXT: ret float [[SUB]] ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll index 711a9cd..1cc4c152 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll @@ -59,13 +59,13 @@ define float @fminnum(ptr %src, i64 %n) { ; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]]) ; CHECK-NEXT: [[TMP8]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD2]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8 -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD2]] ; CHECK-NEXT: [[TMP15:%.*]] = freeze <4 x i1> [[TMP3]] ; CHECK-NEXT: [[TMP18:%.*]] = freeze <4 x i1> [[TMP4]] ; CHECK-NEXT: [[TMP5:%.*]] = or <4 x i1> [[TMP15]], [[TMP18]] ; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]]) +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP6]], [[TMP9]] ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll deleted file mode 100644 index c058789..0000000 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll +++ /dev/null @@ -1,511 +0,0 @@ -; REQUIRES: asserts - -; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ -; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefix=IF-EVL %s - -define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]] -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%c>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN ir<[[LD2:%.+]]> = vp.load vp<[[PTR2]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[SMAX:%.+]]> = call llvm.smax(ir<[[LD1]]>, ir<[[LD2]]>) -; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[SMAX]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %gep, align 4 - %gep3 = getelementptr inbounds i32, ptr %c, i64 %iv - %1 = load i32, ptr %gep3, align 4 - %. = tail call i32 @llvm.smax.i32(i32 %0, i32 %1) - %gep11 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %., ptr %gep11, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%c>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN ir<[[LD2:%.+]]> = vp.load vp<[[PTR2]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[SMIN:%.+]]> = call llvm.smin(ir<[[LD1]]>, ir<[[LD2]]>) -; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[SMIN]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %gep, align 4 - %gep3 = getelementptr inbounds i32, ptr %c, i64 %iv - %1 = load i32, ptr %gep3, align 4 - %. = tail call i32 @llvm.smin.i32(i32 %0, i32 %1) - %gep11 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %., ptr %gep11, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%c>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN ir<[[LD2:%.+]]> = vp.load vp<[[PTR2]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[UMAX:%.+]]> = call llvm.umax(ir<[[LD1]]>, ir<[[LD2]]>) -; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[UMAX]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %gep, align 4 - %gep3 = getelementptr inbounds i32, ptr %c, i64 %iv - %1 = load i32, ptr %gep3, align 4 - %. = tail call i32 @llvm.umax.i32(i32 %0, i32 %1) - %gep11 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %., ptr %gep11, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]] -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%c>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN ir<[[LD2:%.+]]> = vp.load vp<[[PTR2]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[UMIN:%.+]]> = call llvm.umin(ir<[[LD1]]>, ir<[[LD2]]>) -; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[UMIN]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %gep, align 4 - %gep3 = getelementptr inbounds i32, ptr %c, i64 %iv - %1 = load i32, ptr %gep3, align 4 - %. = tail call i32 @llvm.umin.i32(i32 %0, i32 %1) - %gep11 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %., ptr %gep11, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_ctlz(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[CTLZ:%.+]]> = call llvm.ctlz(ir<[[LD1]]>, ir<true>) -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[CTLZ]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %gep, align 4 - %1 = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 %0, i1 true) - %gep3 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %1, ptr %gep3, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_cttz(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[CTTZ:%.+]]> = call llvm.cttz(ir<[[LD1]]>, ir<true>) -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[CTTZ]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %gep, align 4 - %1 = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 %0, i1 true) - %gep3 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %1, ptr %gep3, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_lrint(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[FPEXT:%.+]]> = fpext ir<[[LD1]]> to double -; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[LRINT:%.+]]> = call llvm.lrint(ir<[[FPEXT]]>) -; IF-EVL-NEXT: WIDEN-CAST ir<[[TRUNC:%.+]]> = trunc ir<[[LRINT]]> to i32 -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds float, ptr %b, i64 %iv - %0 = load float, ptr %gep, align 4 - %conv2 = fpext float %0 to double - %1 = tail call i64 @llvm.lrint.i64.f64(double %conv2) - %conv3 = trunc i64 %1 to i32 - %gep5 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %conv3, ptr %gep5, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_llrint(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[FPEXT:%.+]]> = fpext ir<[[LD1]]> to double -; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[LLRINT:%.+]]> = call llvm.llrint(ir<[[FPEXT]]>) -; IF-EVL-NEXT: WIDEN-CAST ir<[[TRUNC:%.+]]> = trunc ir<[[LLRINT]]> to i32 -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds float, ptr %b, i64 %iv - %0 = load float, ptr %gep, align 4 - %conv2 = fpext float %0 to double - %1 = tail call i64 @llvm.llrint.i64.f64(double %conv2) - %conv3 = trunc i64 %1 to i32 - %gep5 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %conv3, ptr %gep5, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_abs(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[ABS:%.+]]> = call llvm.abs(ir<[[LD1]]>, ir<true>) -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ABS]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %gep, align 4 - %cond = tail call i32 @llvm.abs.i32(i32 %0, i1 true) - %gep9 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %cond, ptr %gep9, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -declare i32 @llvm.smax.i32(i32, i32) -declare i32 @llvm.smin.i32(i32, i32) -declare i32 @llvm.umax.i32(i32, i32) -declare i32 @llvm.umin.i32(i32, i32) -declare i32 @llvm.ctlz.i32(i32, i1 immarg) -declare i32 @llvm.cttz.i32(i32, i1 immarg) -declare i64 @llvm.lrint.i64.f64(double) -declare i64 @llvm.llrint.i64.f64(double) -declare i32 @llvm.abs.i32(i32, i1 immarg) diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll deleted file mode 100644 index 8d3fe48..0000000 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll +++ /dev/null @@ -1,576 +0,0 @@ -; REQUIRES: asserts -; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ -; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefix=IF-EVL %s - -define void @vp_sext(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[SEXT:%.+]]> = sext ir<[[LD1]]> to i64 -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[SEXT]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } -; IF-EVL-NEXT: Successor(s): middle.block - - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %gep, align 4 - %conv2 = sext i32 %0 to i64 - %gep4 = getelementptr inbounds i64, ptr %a, i64 %iv - store i64 %conv2, ptr %gep4, align 8 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_zext(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[ZEXT:%.+]]> = zext ir<[[LD1]]> to i64 -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ZEXT]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %gep, align 4 - %conv2 = zext i32 %0 to i64 - %gep4 = getelementptr inbounds i64, ptr %a, i64 %iv - store i64 %conv2, ptr %gep4, align 8 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_trunc(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[TRUNC:%.+]]> = trunc ir<[[LD1]]> to i16 -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %gep, align 4 - %conv2 = trunc i32 %0 to i16 - %gep4 = getelementptr inbounds i16, ptr %a, i64 %iv - store i16 %conv2, ptr %gep4, align 2 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_fpext(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[FPEXT:%.+]]> = fpext ir<[[LD1]]> to double -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPEXT]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds float, ptr %b, i64 %iv - %0 = load float, ptr %gep, align 4 - %conv2 = fpext float %0 to double - %gep4 = getelementptr inbounds double, ptr %a, i64 %iv - store double %conv2, ptr %gep4, align 8 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[FPTRUNC:%.+]]> = fptrunc ir<[[LD1]]> to float -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTRUNC]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds double, ptr %b, i64 %iv - %0 = load double, ptr %gep, align 8 - %conv2 = fptrunc double %0 to float - %gep4 = getelementptr inbounds float, ptr %a, i64 %iv - store float %conv2, ptr %gep4, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_sitofp(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[SITOFP:%.+]]> = sitofp ir<[[LD1]]> to float -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[SITOFP]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %gep, align 4 - %conv2 = sitofp i32 %0 to float - %gep4 = getelementptr inbounds float, ptr %a, i64 %iv - store float %conv2, ptr %gep4, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_uitofp(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]] -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[UITOFP:%.+]]> = uitofp ir<[[LD1]]> to float -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[UITOFP]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %gep, align 4 - %conv2 = uitofp i32 %0 to float - %gep4 = getelementptr inbounds float, ptr %a, i64 %iv - store float %conv2, ptr %gep4, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_fptosi(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[FPTOSI:%.+]]> = fptosi ir<[[LD1]]> to i32 -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTOSI]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds float, ptr %b, i64 %iv - %0 = load float, ptr %gep, align 4 - %conv2 = fptosi float %0 to i32 - %gep4 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %conv2, ptr %gep4, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_fptoui(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[FPTOUI:%.+]]> = fptoui ir<[[LD1]]> to i32 -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTOUI]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] - %gep = getelementptr inbounds float, ptr %b, i64 %iv - %0 = load float, ptr %gep, align 4 - %conv2 = fptoui float %0 to i32 - %gep4 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %conv2, ptr %gep4, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI -; -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<%N> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> -; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[INTTOPTR:%.+]]> = inttoptr ir<[[LD1]]> to ptr -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> -; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[INTTOPTR]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]> -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } - -entry: - br label %loop - -loop: - %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] - %gep = getelementptr inbounds i64, ptr %b, i64 %iv - %0 = load i64, ptr %gep, align 8 - %1 = inttoptr i64 %0 to ptr - %gep2 = getelementptr inbounds ptr, ptr %a, i64 %iv - store ptr %1, ptr %gep2, align 8 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} - -define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) { -; IF-EVL: VPlan 'Initial VPlan for VF={1},UF>=1' -; IF-EVL-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI - -; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2},UF={1}' { -; IF-EVL-NEXT: Live-in vp<[[VFUF:%.+]]> = VF * UF -; IF-EVL-NEXT: Live-in vp<[[VTC:%.+]]> = vector-trip-count -; IF-EVL-NEXT: Live-in ir<[[N:%.+]]> = original trip-count - -; IF-EVL: vector.ph: -; IF-EVL-NEXT: Successor(s): vector loop - -; IF-EVL: <x1> vector loop: { -; IF-EVL-NEXT: vector.body: -; IF-EVL-NEXT: EMIT vp<[[INDEX:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[INDEX_NEXT:%.+]]> -; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[INDEX_EVL:%.+]]> = phi ir<0>, vp<[[INDEX_EVL_NEXT:%.+]]> -; IF-EVL-NEXT: ir<[[IV:%.+]]> = WIDEN-INDUCTION ir<0>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ] -; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]> -; IF-EVL-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[INDEX_EVL]]>, ir<1>, vp<[[EVL]]> -; IF-EVL-NEXT: WIDEN-GEP Inv[Var] ir<[[GEP:%.+]]> = getelementptr inbounds ir<%b>, ir<[[IV]]> -; IF-EVL-NEXT: WIDEN-CAST ir<[[PTRTOINT:%.+]]> = ptrtoint ir<[[GEP]]> to i64 -; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[SCALAR_STEPS]]> -; IF-EVL-NEXT: vp<[[VECTOR_PTR:%.+]]> = vector-pointer ir<[[GEP2]]> -; IF-EVL-NEXT: WIDEN vp.store vp<[[VECTOR_PTR]]>, ir<[[PTRTOINT]]>, vp<[[EVL]]> -; IF-EVL-NEXT: EMIT-SCALAR vp<[[ZEXT:%.+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[INDEX_EVL_NEXT]]> = add vp<[[ZEXT]]>, vp<[[INDEX_EVL]]> -; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[ZEXT]]> -; IF-EVL-NEXT: EMIT vp<[[INDEX_NEXT]]> = add vp<[[INDEX]]>, vp<[[VFUF]]> -; IF-EVL-NEXT: EMIT branch-on-count vp<[[INDEX_NEXT]]>, vp<[[VTC]]> -; IF-EVL-NEXT: No successors -; IF-EVL-NEXT: } -; IF-EVL-NEXT: Successor(s): middle.block -entry: - br label %loop - -loop: - %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] - %gep = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = ptrtoint ptr %gep to i64 - %gep2 = getelementptr inbounds i64, ptr %a, i64 %iv - store i64 %0, ptr %gep2, align 8 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop - -exit: - ret void -} diff --git a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll index af648df..01fab87 100644 --- a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll @@ -59,13 +59,13 @@ define float @fmaxnum(ptr %src, i64 %n) { ; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]]) ; CHECK-NEXT: [[TMP8]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD2]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8 -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD2]] ; CHECK-NEXT: [[TMP15:%.*]] = freeze <4 x i1> [[TMP3]] ; CHECK-NEXT: [[TMP18:%.*]] = freeze <4 x i1> [[TMP4]] ; CHECK-NEXT: [[TMP5:%.*]] = or <4 x i1> [[TMP15]], [[TMP18]] ; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]]) +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP6]], [[TMP9]] ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -116,23 +116,79 @@ define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) { ; CHECK-LABEL: define float @test_fmax_and_fmin( ; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 8 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] ; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] -; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4 -; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_0]], i32 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC_0]], align 4 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_1]], i32 4 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 +; CHECK-NEXT: [[TMP4]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI2]], <4 x float> [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP5]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI3]], <4 x float> [[WIDE_LOAD4]]) +; CHECK-NEXT: [[TMP6]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD5]]) +; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD6]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8 +; CHECK-NEXT: [[TMP8:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD5]], [[WIDE_LOAD5]] +; CHECK-NEXT: [[TMP9:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD6]] +; CHECK-NEXT: [[TMP14:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP15:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD4]], [[WIDE_LOAD4]] +; CHECK-NEXT: [[TMP12:%.*]] = or <4 x i1> [[TMP8]], [[TMP14]] +; CHECK-NEXT: [[TMP13:%.*]] = or <4 x i1> [[TMP9]], [[TMP15]] +; CHECK-NEXT: [[TMP16:%.*]] = freeze <4 x i1> [[TMP12]] +; CHECK-NEXT: [[TMP17:%.*]] = freeze <4 x i1> [[TMP13]] +; CHECK-NEXT: [[TMP18:%.*]] = or <4 x i1> [[TMP16]], [[TMP17]] +; CHECK-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP18]]) +; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: [[TMP20:%.*]] = or i1 [[TMP19]], [[TMP21]] +; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP23:%.*]] = select i1 [[TMP19]], <4 x float> [[VEC_PHI]], <4 x float> [[TMP6]] +; CHECK-NEXT: [[TMP24:%.*]] = select i1 [[TMP19]], <4 x float> [[VEC_PHI1]], <4 x float> [[TMP7]] +; CHECK-NEXT: [[TMP25:%.*]] = select i1 [[TMP19]], <4 x float> [[VEC_PHI2]], <4 x float> [[TMP4]] +; CHECK-NEXT: [[TMP26:%.*]] = select i1 [[TMP19]], <4 x float> [[VEC_PHI3]], <4 x float> [[TMP5]] +; CHECK-NEXT: [[TMP27:%.*]] = select i1 [[TMP19]], i64 [[IV]], i64 [[N_VEC]] +; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[TMP23]], <4 x float> [[TMP24]]) +; CHECK-NEXT: [[TMP28:%.*]] = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[RDX_MINMAX]]) +; CHECK-NEXT: [[RDX_MINMAX9:%.*]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[TMP25]], <4 x float> [[TMP26]]) +; CHECK-NEXT: [[TMP29:%.*]] = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> [[RDX_MINMAX9]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-NEXT: [[TMP30:%.*]] = xor i1 [[TMP19]], true +; CHECK-NEXT: [[TMP31:%.*]] = and i1 [[CMP_N]], [[TMP30]] +; CHECK-NEXT: br i1 [[TMP31]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP27]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP28]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX8:%.*]] = phi float [ [[TMP29]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MAX:%.*]] = phi float [ [[BC_MERGE_RDX8]], %[[SCALAR_PH]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV1]] +; CHECK-NEXT: [[GEP_SRC_3:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV1]] +; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_2]], align 4 +; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_3]], align 4 ; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]]) ; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]]) -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ], [ [[TMP29]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ], [ [[TMP28]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]] ; CHECK-NEXT: ret float [[SUB]] ; diff --git a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll index 242df1f..e028bec 100644 --- a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll @@ -205,10 +205,10 @@ define float @fmaxnum_1(ptr %src, i64 %n) { ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC]], align 4 ; CHECK-NEXT: [[TMP4]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[VEC_PHI]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4 -; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP2:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP10:%.*]] = freeze <4 x i1> [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP10]]) +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP3]], [[TMP5]] ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -270,10 +270,10 @@ define float @fmaxnum_2(ptr %src, i64 %n) { ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC]], align 4 ; CHECK-NEXT: [[TMP4]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4 -; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP2:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP10:%.*]] = freeze <4 x i1> [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP10]]) +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP3]], [[TMP5]] ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -337,10 +337,10 @@ define float @fmaxnum_induction_starts_at_10(ptr %src, i64 %n) { ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC]], align 4 ; CHECK-NEXT: [[TMP3]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[VEC_PHI]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP5:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP12:%.*]] = freeze <4 x i1> [[TMP5]] ; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP12]]) +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP6]], [[TMP4]] ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -405,10 +405,10 @@ define float @fmaxnum_induction_starts_at_value(ptr %src, i64 %start, i64 %n) { ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC]], align 4 ; CHECK-NEXT: [[TMP3]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[VEC_PHI]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP5:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP12:%.*]] = freeze <4 x i1> [[TMP5]] ; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP12]]) +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP6]], [[TMP4]] ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -680,23 +680,62 @@ define float @test_fmax_and_fmax(ptr %src.0, ptr %src.1, i64 %n) { ; CHECK-LABEL: define float @test_fmax_and_fmax( ; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] ; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] -; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4 -; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC_0]], align 4 +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[TMP2]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI1]], <4 x float> [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP3]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD2]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4 +; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD2]] +; CHECK-NEXT: [[TMP7:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP6:%.*]] = or <4 x i1> [[TMP4]], [[TMP7]] +; CHECK-NEXT: [[TMP8:%.*]] = freeze <4 x i1> [[TMP6]] +; CHECK-NEXT: [[TMP9:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP8]]) +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[TMP11]] +; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP13:%.*]] = select i1 [[TMP9]], <4 x float> [[VEC_PHI]], <4 x float> [[TMP3]] +; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP9]], <4 x float> [[VEC_PHI1]], <4 x float> [[TMP2]] +; CHECK-NEXT: [[TMP15:%.*]] = select i1 [[TMP9]], i64 [[IV]], i64 [[N_VEC]] +; CHECK-NEXT: [[TMP16:%.*]] = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[TMP13]]) +; CHECK-NEXT: [[TMP17:%.*]] = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> [[TMP14]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-NEXT: [[TMP18:%.*]] = xor i1 [[TMP9]], true +; CHECK-NEXT: [[TMP19:%.*]] = and i1 [[CMP_N]], [[TMP18]] +; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP15]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP16]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX3:%.*]] = phi float [ [[TMP17]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MAX:%.*]] = phi float [ [[BC_MERGE_RDX3]], %[[SCALAR_PH]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV1]] +; CHECK-NEXT: [[GEP_SRC_3:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV1]] +; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_2]], align 4 +; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_3]], align 4 ; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]]) ; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]]) -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ] -; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ], [ [[TMP17]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]] ; CHECK-NEXT: ret float [[SUB]] ; diff --git a/llvm/test/Transforms/LoopVectorize/fmin-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/fmin-without-fast-math-flags.ll index 7f65306..368553d 100644 --- a/llvm/test/Transforms/LoopVectorize/fmin-without-fast-math-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/fmin-without-fast-math-flags.ll @@ -205,10 +205,10 @@ define float @fminnum_1(ptr %src, i64 %n) { ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC]], align 4 ; CHECK-NEXT: [[TMP4]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[VEC_PHI]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4 -; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP2:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP10:%.*]] = freeze <4 x i1> [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP10]]) +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP3]], [[TMP5]] ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -270,10 +270,10 @@ define float @fminnum_2(ptr %src, i64 %n) { ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC]], align 4 ; CHECK-NEXT: [[TMP4]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4 -; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP2:%.*]] = fcmp uno <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP10:%.*]] = freeze <4 x i1> [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP10]]) +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP3]], [[TMP5]] ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll index 338d925..33e3e83 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll @@ -49,10 +49,10 @@ entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %0, i64 256) ] %start.ptr = load ptr, ptr %first, align 8 %1 = load i64, ptr %first, align 8 - %coerce.val.pi.i = add i64 %1, 256 - %coerce.val.ip = inttoptr i64 %coerce.val.pi.i to ptr - %cmp.not6.i.i = icmp eq ptr %start.ptr, %coerce.val.ip - br i1 %cmp.not6.i.i, label %return, label %loop.ph + %coerce.val.p = add i64 %1, 256 + %coerce.val.ip = inttoptr i64 %coerce.val.p to ptr + %ec6. = icmp eq ptr %start.ptr, %coerce.val.ip + br i1 %ec6., label %return, label %loop.ph loop.ph: %2 = load i16, ptr %s.addr, align 2 @@ -61,13 +61,13 @@ loop.ph: loop.header: %ptr.iv = phi ptr [ %start.ptr, %loop.ph ], [ %ptr.iv.next, %loop.latch ] %3 = load i16, ptr %ptr.iv, align 2 - %cmp2.i.i = icmp eq i16 %3, %2 - br i1 %cmp2.i.i, label %return, label %loop.latch + %cmp2. = icmp eq i16 %3, %2 + br i1 %cmp2., label %return, label %loop.latch loop.latch: %ptr.iv.next = getelementptr inbounds nuw i8, ptr %ptr.iv, i64 2 - %cmp.not.i.i = icmp eq ptr %ptr.iv.next, %coerce.val.ip - br i1 %cmp.not.i.i, label %return, label %loop.header + %ec. = icmp eq ptr %ptr.iv.next, %coerce.val.ip + br i1 %ec., label %return, label %loop.header return: %merge = phi ptr [ %start.ptr, %entry ], [ %coerce.val.ip, %loop.latch ], [ %ptr.iv, %loop.header ] @@ -103,10 +103,10 @@ entry: %0 = load ptr, ptr %first, align 8 %start.ptr = load ptr, ptr %first, align 8 %1 = load i64, ptr %first, align 8 - %coerce.val.pi.i = add i64 %1, 256 - %coerce.val.ip = inttoptr i64 %coerce.val.pi.i to ptr - %cmp.not6.i.i = icmp eq ptr %start.ptr, %coerce.val.ip - br i1 %cmp.not6.i.i, label %return, label %loop.ph + %coerce.val.p = add i64 %1, 256 + %coerce.val.ip = inttoptr i64 %coerce.val.p to ptr + %ec6. = icmp eq ptr %start.ptr, %coerce.val.ip + br i1 %ec6., label %return, label %loop.ph loop.ph: %2 = load i16, ptr %s.addr, align 2 @@ -115,13 +115,13 @@ loop.ph: loop.header: %ptr.iv = phi ptr [ %start.ptr, %loop.ph ], [ %ptr.iv.next, %loop.latch ] %3 = load i16, ptr %ptr.iv, align 2 - %cmp2.i.i = icmp eq i16 %3, %2 - br i1 %cmp2.i.i, label %return, label %loop.latch + %cmp2. = icmp eq i16 %3, %2 + br i1 %cmp2., label %return, label %loop.latch loop.latch: %ptr.iv.next = getelementptr inbounds nuw i8, ptr %ptr.iv, i64 2 - %cmp.not.i.i = icmp eq ptr %ptr.iv.next, %coerce.val.ip - br i1 %cmp.not.i.i, label %return, label %loop.header + %ec. = icmp eq ptr %ptr.iv.next, %coerce.val.ip + br i1 %ec., label %return, label %loop.header return: %merge = phi ptr [ %start.ptr, %entry ], [ %coerce.val.ip, %loop.latch ], [ %ptr.iv, %loop.header ] @@ -129,9 +129,118 @@ return: ret i64 %res } +define ptr @std_find_caller(ptr noundef %first, ptr noundef %last) { +; CHECK-LABEL: define noundef ptr @std_find_caller( +; CHECK-SAME: ptr noundef [[FIRST:%.*]], ptr noundef [[LAST:%.*]]) local_unnamed_addr #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[FIRST]], i64 2) ] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[LAST]], i64 2) ] +; CHECK-NEXT: [[PRE_I:%.*]] = icmp eq ptr [[FIRST]], [[LAST]] +; CHECK-NEXT: br i1 [[PRE_I]], label %[[STD_FIND_GENERIC_IMPL_EXIT:.*]], label %[[LOOP_HEADER_I_PREHEADER:.*]] +; CHECK: [[LOOP_HEADER_I_PREHEADER]]: +; CHECK-NEXT: [[LAST2:%.*]] = ptrtoint ptr [[LAST]] to i64 +; CHECK-NEXT: [[FIRST3:%.*]] = ptrtoint ptr [[FIRST]] to i64 +; CHECK-NEXT: [[LAST_I64:%.*]] = ptrtoint ptr [[LAST]] to i64 +; CHECK-NEXT: [[FIRST1:%.*]] = ptrtoint ptr [[FIRST]] to i64 +; CHECK-NEXT: [[PTR_SUB:%.*]] = sub i64 [[LAST_I64]], [[FIRST1]] +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[FIRST]], i64 [[PTR_SUB]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[LAST2]], -2 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[FIRST3]] +; CHECK-NEXT: [[TMP2:%.*]] = lshr exact i64 [[TMP1]], 1 +; CHECK-NEXT: [[TMP3:%.*]] = add nuw i64 [[TMP2]], 1 +; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP3]], 3 +; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP1]], 6 +; CHECK-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i64 [[TMP4]], 6 +; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label %[[LOOP_HEADER_I_PROL_LOOPEXIT:.*]], label %[[LOOP_HEADER_I_PROL:.*]] +; CHECK: [[LOOP_HEADER_I_PROL]]: +; CHECK-NEXT: [[PTR_IV_I_PROL:%.*]] = phi ptr [ [[PTR_IV_NEXT_I_PROL:%.*]], %[[LOOP_LATCH_I_PROL:.*]] ], [ [[FIRST]], %[[LOOP_HEADER_I_PREHEADER]] ] +; CHECK-NEXT: [[PROL_ITER:%.*]] = phi i64 [ [[PROL_ITER_NEXT:%.*]], %[[LOOP_LATCH_I_PROL]] ], [ 0, %[[LOOP_HEADER_I_PREHEADER]] ] +; CHECK-NEXT: [[L_I_PROL:%.*]] = load i16, ptr [[PTR_IV_I_PROL]], align 2 +; CHECK-NEXT: [[C_1_I_PROL:%.*]] = icmp eq i16 [[L_I_PROL]], 1 +; CHECK-NEXT: br i1 [[C_1_I_PROL]], label %[[STD_FIND_GENERIC_IMPL_EXIT]], label %[[LOOP_LATCH_I_PROL]] +; CHECK: [[LOOP_LATCH_I_PROL]]: +; CHECK-NEXT: [[PTR_IV_NEXT_I_PROL]] = getelementptr inbounds nuw i8, ptr [[PTR_IV_I_PROL]], i64 2 +; CHECK-NEXT: [[PROL_ITER_NEXT]] = add i64 [[PROL_ITER]], 1 +; CHECK-NEXT: [[PROL_ITER_CMP_NOT:%.*]] = icmp eq i64 [[PROL_ITER_NEXT]], [[XTRAITER]] +; CHECK-NEXT: br i1 [[PROL_ITER_CMP_NOT]], label %[[LOOP_HEADER_I_PROL_LOOPEXIT]], label %[[LOOP_HEADER_I_PROL]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[LOOP_HEADER_I_PROL_LOOPEXIT]]: +; CHECK-NEXT: [[PTR_IV_I_UNR:%.*]] = phi ptr [ [[FIRST]], %[[LOOP_HEADER_I_PREHEADER]] ], [ [[PTR_IV_NEXT_I_PROL]], %[[LOOP_LATCH_I_PROL]] ] +; CHECK-NEXT: [[TMP5:%.*]] = icmp ult i64 [[TMP1]], 6 +; CHECK-NEXT: br i1 [[TMP5]], label %[[STD_FIND_GENERIC_IMPL_EXIT]], label %[[LOOP_HEADER_I:.*]] +; CHECK: [[LOOP_HEADER_I]]: +; CHECK-NEXT: [[PTR_IV_I:%.*]] = phi ptr [ [[PTR_IV_NEXT_I_3:%.*]], %[[LOOP_LATCH_I_3:.*]] ], [ [[PTR_IV_I_UNR]], %[[LOOP_HEADER_I_PROL_LOOPEXIT]] ] +; CHECK-NEXT: [[L_I:%.*]] = load i16, ptr [[PTR_IV_I]], align 2 +; CHECK-NEXT: [[C_1_I:%.*]] = icmp eq i16 [[L_I]], 1 +; CHECK-NEXT: br i1 [[C_1_I]], label %[[STD_FIND_GENERIC_IMPL_EXIT]], label %[[LOOP_LATCH_I:.*]] +; CHECK: [[LOOP_LATCH_I]]: +; CHECK-NEXT: [[PTR_IV_NEXT_I:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR_IV_I]], i64 2 +; CHECK-NEXT: [[L_I_1:%.*]] = load i16, ptr [[PTR_IV_NEXT_I]], align 2 +; CHECK-NEXT: [[C_1_I_1:%.*]] = icmp eq i16 [[L_I_1]], 1 +; CHECK-NEXT: br i1 [[C_1_I_1]], label %[[STD_FIND_GENERIC_IMPL_EXIT_LOOPEXIT_UNR_LCSSA_LOOPEXIT_SPLIT_LOOP_EXIT11:.*]], label %[[LOOP_LATCH_I_1:.*]] +; CHECK: [[LOOP_LATCH_I_1]]: +; CHECK-NEXT: [[PTR_IV_NEXT_I_1:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR_IV_I]], i64 4 +; CHECK-NEXT: [[L_I_2:%.*]] = load i16, ptr [[PTR_IV_NEXT_I_1]], align 2 +; CHECK-NEXT: [[C_1_I_2:%.*]] = icmp eq i16 [[L_I_2]], 1 +; CHECK-NEXT: br i1 [[C_1_I_2]], label %[[STD_FIND_GENERIC_IMPL_EXIT_LOOPEXIT_UNR_LCSSA_LOOPEXIT_SPLIT_LOOP_EXIT9:.*]], label %[[LOOP_LATCH_I_2:.*]] +; CHECK: [[LOOP_LATCH_I_2]]: +; CHECK-NEXT: [[PTR_IV_NEXT_I_2:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR_IV_I]], i64 6 +; CHECK-NEXT: [[L_I_3:%.*]] = load i16, ptr [[PTR_IV_NEXT_I_2]], align 2 +; CHECK-NEXT: [[C_1_I_3:%.*]] = icmp eq i16 [[L_I_3]], 1 +; CHECK-NEXT: br i1 [[C_1_I_3]], label %[[STD_FIND_GENERIC_IMPL_EXIT_LOOPEXIT_UNR_LCSSA_LOOPEXIT_SPLIT_LOOP_EXIT7:.*]], label %[[LOOP_LATCH_I_3]] +; CHECK: [[LOOP_LATCH_I_3]]: +; CHECK-NEXT: [[PTR_IV_NEXT_I_3]] = getelementptr inbounds nuw i8, ptr [[PTR_IV_I]], i64 8 +; CHECK-NEXT: [[EC_I_3:%.*]] = icmp eq ptr [[PTR_IV_NEXT_I_3]], [[LAST]] +; CHECK-NEXT: br i1 [[EC_I_3]], label %[[STD_FIND_GENERIC_IMPL_EXIT]], label %[[LOOP_HEADER_I]] +; CHECK: [[STD_FIND_GENERIC_IMPL_EXIT_LOOPEXIT_UNR_LCSSA_LOOPEXIT_SPLIT_LOOP_EXIT7]]: +; CHECK-NEXT: [[PTR_IV_NEXT_I_2_LE:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR_IV_I]], i64 6 +; CHECK-NEXT: br label %[[STD_FIND_GENERIC_IMPL_EXIT]] +; CHECK: [[STD_FIND_GENERIC_IMPL_EXIT_LOOPEXIT_UNR_LCSSA_LOOPEXIT_SPLIT_LOOP_EXIT9]]: +; CHECK-NEXT: [[PTR_IV_NEXT_I_1_LE:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR_IV_I]], i64 4 +; CHECK-NEXT: br label %[[STD_FIND_GENERIC_IMPL_EXIT]] +; CHECK: [[STD_FIND_GENERIC_IMPL_EXIT_LOOPEXIT_UNR_LCSSA_LOOPEXIT_SPLIT_LOOP_EXIT11]]: +; CHECK-NEXT: [[PTR_IV_NEXT_I_LE:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR_IV_I]], i64 2 +; CHECK-NEXT: br label %[[STD_FIND_GENERIC_IMPL_EXIT]] +; CHECK: [[STD_FIND_GENERIC_IMPL_EXIT]]: +; CHECK-NEXT: [[RES_I:%.*]] = phi ptr [ [[FIRST]], %[[ENTRY]] ], [ [[SCEVGEP]], %[[LOOP_HEADER_I_PROL_LOOPEXIT]] ], [ [[PTR_IV_NEXT_I_2_LE]], %[[STD_FIND_GENERIC_IMPL_EXIT_LOOPEXIT_UNR_LCSSA_LOOPEXIT_SPLIT_LOOP_EXIT7]] ], [ [[PTR_IV_NEXT_I_1_LE]], %[[STD_FIND_GENERIC_IMPL_EXIT_LOOPEXIT_UNR_LCSSA_LOOPEXIT_SPLIT_LOOP_EXIT9]] ], [ [[PTR_IV_NEXT_I_LE]], %[[STD_FIND_GENERIC_IMPL_EXIT_LOOPEXIT_UNR_LCSSA_LOOPEXIT_SPLIT_LOOP_EXIT11]] ], [ [[SCEVGEP]], %[[LOOP_LATCH_I_3]] ], [ [[PTR_IV_I]], %[[LOOP_HEADER_I]] ], [ [[PTR_IV_I_PROL]], %[[LOOP_HEADER_I_PROL]] ] +; CHECK-NEXT: ret ptr [[RES_I]] +; +entry: + %last.i64 = ptrtoint ptr %last to i64 + %first.i64 = ptrtoint ptr %first to i64 + %ptr.sub = sub i64 %last.i64, %first.i64 + call void @llvm.assume(i1 true) [ "align"(ptr %first, i64 2) ] + call void @llvm.assume(i1 true) [ "align"(ptr %last, i64 2) ] + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %first, i64 %ptr.sub) ] + %call = call noundef ptr @std_find_generic_impl(ptr noundef nonnull %first, ptr noundef %last, i16 noundef signext 1) + ret ptr %call +} + +define linkonce_odr noundef ptr @std_find_generic_impl(ptr noundef %first, ptr noundef %last, i16 noundef %value) { +entry: + %pre = icmp eq ptr %first, %last + br i1 %pre, label %exit, label %loop.header + +loop.header: + %ptr.iv = phi ptr [ %ptr.iv.next, %loop.latch ], [ %first, %entry ] + %l = load i16, ptr %ptr.iv, align 2 + %c.1 = icmp eq i16 %l, %value + br i1 %c.1, label %exit, label %loop.latch + +loop.latch: + %ptr.iv.next = getelementptr inbounds nuw i8, ptr %ptr.iv, i64 2 + %ec = icmp eq ptr %ptr.iv.next, %last + br i1 %ec, label %exit, label %loop.header + +exit: + %res = phi ptr [ %first, %entry ], [ %ptr.iv, %loop.header ], [ %ptr.iv.next, %loop.latch ] + ret ptr %res +} + declare void @llvm.assume(i1 noundef) ;. ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} +; CHECK: [[META4]] = !{!"llvm.loop.unroll.disable"} ;. |
