; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64 -mattr=+cf,+nf,+avx512f -verify-machineinstrs | FileCheck %s define void @basic(i32 %a, ptr %b, ptr %p, ptr %q) { ; CHECK-LABEL: basic: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: testl %edi, %edi ; CHECK-NEXT: cfcmovel (%rsi), %eax ; CHECK-NEXT: cfcmovel %eax, (%rdx) ; CHECK-NEXT: movl $1, %eax ; CHECK-NEXT: cfcmovneq %rax, (%rdx) ; CHECK-NEXT: movw $2, %ax ; CHECK-NEXT: cfcmovnew %ax, (%rcx) ; CHECK-NEXT: retq entry: %cond = icmp eq i32 %a, 0 %0 = bitcast i1 %cond to <1 x i1> %1 = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr %b, i32 4, <1 x i1> %0, <1 x i32> poison) call void @llvm.masked.store.v1i32.p0(<1 x i32> %1, ptr %p, i32 4, <1 x i1> %0) %2 = xor i1 %cond, true %3 = bitcast i1 %2 to <1 x i1> call void @llvm.masked.store.v1i64.p0(<1 x i64> , ptr %p, i32 8, <1 x i1> %3) call void @llvm.masked.store.v1i16.p0(<1 x i16> , ptr %q, i32 8, <1 x i1> %3) ret void } define i16 @cload_passthru_zero(i16 %a, ptr %b) { ; CHECK-LABEL: cload_passthru_zero: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: testw %di, %di ; CHECK-NEXT: cfcmovew (%rsi), %ax ; CHECK-NEXT: retq entry: %cond = icmp eq i16 %a, 0 %0 = bitcast i1 %cond to <1 x i1> %1 = call <1 x i16> @llvm.masked.load.v1i16.p0(ptr %b, i32 4, <1 x i1> %0, <1 x i16> ) %2 = bitcast <1 x i16> %1 to i16 ret i16 %2 } define i64 @cload_passthru_not_zero(i64 %a, ptr %b) { ; CHECK-LABEL: cload_passthru_not_zero: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: testq %rdi, %rdi ; CHECK-NEXT: cfcmoveq (%rsi), %rdi, %rax ; CHECK-NEXT: retq entry: %cond = icmp eq i64 %a, 0 %0 = bitcast i1 %cond to <1 x i1> %va = bitcast i64 %a to <1 x i64> %1 = call <1 x i64> @llvm.masked.load.v1i64.p0(ptr %b, i32 4, <1 x i1> %0, <1 x i64> %va) %2 = bitcast <1 x i64> %1 to i64 ret i64 %2 } ;; CFCMOV can use the flags produced by SUB directly. define i64 @reduced_data_dependency(i64 %a, i64 %b, ptr %c) { ; CHECK-LABEL: reduced_data_dependency: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movq %rdi, %rcx ; CHECK-NEXT: subq %rsi, %rcx ; CHECK-NEXT: cfcmovnsq (%rdx), %rdi, %rax ; CHECK-NEXT: addq %rcx, %rax ; CHECK-NEXT: retq entry: %sub = sub i64 %a, %b %cond = icmp sge i64 %sub, 0 %0 = bitcast i1 %cond to <1 x i1> %va = bitcast i64 %a to <1 x i64> %1 = call <1 x i64> @llvm.masked.load.v1i64.p0(ptr %c, i32 4, <1 x i1> %0, <1 x i64> %va) %2 = bitcast <1 x i64> %1 to i64 %3 = add i64 %2, %sub ret i64 %3 } ;; No need to optimize the generated assembly for cond_false/cond_true b/c it ;; should never be emitted by middle end. Add IR here just to check it's ;; legal to feed constant mask to backend. define i16 @cond_false(ptr %b) { ; CHECK-LABEL: cond_false: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: negb %al ; CHECK-NEXT: cfcmovnew (%rdi), %ax ; CHECK-NEXT: retq entry: %0 = bitcast i1 false to <1 x i1> %1 = call <1 x i16> @llvm.masked.load.v1i16.p0(ptr %b, i32 4, <1 x i1> %0, <1 x i16> ) %2 = bitcast <1 x i16> %1 to i16 ret i16 %2 } define i64 @cond_true(ptr %b) { ; CHECK-LABEL: cond_true: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movb $1, %al ; CHECK-NEXT: negb %al ; CHECK-NEXT: cfcmovneq (%rdi), %rax ; CHECK-NEXT: retq entry: %0 = bitcast i1 true to <1 x i1> %1 = call <1 x i64> @llvm.masked.load.v1i64.p0(ptr %b, i32 4, <1 x i1> %0, <1 x i64> ) %2 = bitcast <1 x i64> %1 to i64 ret i64 %2 } define void @no_crash(ptr %p, <4 x i1> %cond1, <4 x i1> %cond2) { ; CHECK-LABEL: no_crash: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 ; CHECK-NEXT: vptestmd %zmm1, %zmm1, %k0 ; CHECK-NEXT: kshiftlw $12, %k0, %k0 ; CHECK-NEXT: kshiftrw $12, %k0, %k1 ; CHECK-NEXT: vpslld $31, %xmm0, %xmm0 ; CHECK-NEXT: vptestmd %zmm0, %zmm0, %k0 ; CHECK-NEXT: kshiftlw $12, %k0, %k0 ; CHECK-NEXT: kshiftrw $12, %k0, %k2 ; CHECK-NEXT: vmovdqu64 (%rdi), %zmm0 {%k2} {z} ; CHECK-NEXT: vmovdqu64 %zmm0, (%rdi) {%k1} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: %0 = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr %p, i32 8, <4 x i1> %cond1, <4 x i64> poison) call void @llvm.masked.store.v4i64.p0(<4 x i64> %0, ptr %p, i32 8, <4 x i1> %cond2) ret void } define void @single_cmp(i32 %a, i32 %b, ptr %c, ptr %d) { ; CHECK-LABEL: single_cmp: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cmpl %esi, %edi ; CHECK-NEXT: cfcmovnew (%rdx), %ax ; CHECK-NEXT: cfcmovnew %ax, (%rcx) ; CHECK-NEXT: retq entry: %0 = icmp ne i32 %a, %b %1 = insertelement <1 x i1> poison, i1 %0, i64 0 %2 = tail call <1 x i16> @llvm.masked.load.v1i16.p0(ptr %c, i32 2, <1 x i1> %1, <1 x i16> poison) tail call void @llvm.masked.store.v1i16.p0(<1 x i16> %2, ptr %d, i32 2, <1 x i1> %1) ret void } define void @load_add_store(i32 %a, i32 %b, ptr %p) { ; CHECK-LABEL: load_add_store: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cmpl %esi, %edi ; CHECK-NEXT: cfcmovnew (%rdx), %ax ; CHECK-NEXT: {nf} incl %eax ; CHECK-NEXT: cfcmovnew %ax, (%rdx) ; CHECK-NEXT: retq entry: %0 = icmp ne i32 %a, %b %1 = insertelement <1 x i1> poison, i1 %0, i64 0 %2 = tail call <1 x i16> @llvm.masked.load.v1i16.p0(ptr %p, i32 2, <1 x i1> %1, <1 x i16> poison) %3 = extractelement <1 x i16> %2, i64 0 %4 = add i16 %3, 1 %5 = insertelement <1 x i16> poison, i16 %4, i64 0 tail call void @llvm.masked.store.v1i16.p0(<1 x i16> %5, ptr %p, i32 2, <1 x i1> %1) ret void } define void @load_zext(i1 %cond, ptr %b, ptr %p) { ; CHECK-LABEL: load_zext: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: testb $1, %dil ; CHECK-NEXT: cfcmovnew (%rsi), %ax ; CHECK-NEXT: movzwl %ax, %eax ; CHECK-NEXT: cfcmovnel %eax, (%rdx) ; CHECK-NEXT: retq entry: %0 = bitcast i1 %cond to <1 x i1> %1 = call <1 x i16> @llvm.masked.load.v1i16.p0(ptr %b, i32 2, <1 x i1> %0, <1 x i16> poison) %2 = bitcast <1 x i16> %1 to i16 %zext = zext i16 %2 to i32 %3 = bitcast i32 %zext to <1 x i32> call void @llvm.masked.store.v1i32.p0(<1 x i32> %3, ptr %p, i32 4, <1 x i1> %0) ret void } define void @load_sext(i1 %cond, ptr %b, ptr %p) { ; CHECK-LABEL: load_sext: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: testb $1, %dil ; CHECK-NEXT: cfcmovnel (%rsi), %eax ; CHECK-NEXT: cltq ; CHECK-NEXT: cfcmovneq %rax, (%rdx) ; CHECK-NEXT: retq entry: %0 = bitcast i1 %cond to <1 x i1> %1 = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr %b, i32 2, <1 x i1> %0, <1 x i32> poison) %2 = bitcast <1 x i32> %1 to i32 %zext = sext i32 %2 to i64 %3 = bitcast i64 %zext to <1 x i64> call void @llvm.masked.store.v1i64.p0(<1 x i64> %3, ptr %p, i32 4, <1 x i1> %0) ret void } define void @sink_gep(ptr %p, i1 %cond) { ; CHECK-LABEL: sink_gep: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: testb $1, %sil ; CHECK-NEXT: cfcmovnel %eax, 112(%rdi) ; CHECK-NEXT: cfcmovnel 112(%rdi), %eax ; CHECK-NEXT: movl %eax, (%rdi) ; CHECK-NEXT: retq entry: %0 = getelementptr i8, ptr %p, i64 112 br label %next next: %1 = bitcast i1 %cond to <1 x i1> call void @llvm.masked.store.v1i32.p0(<1 x i32> zeroinitializer, ptr %0, i32 1, <1 x i1> %1) %2 = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr %0, i32 1, <1 x i1> %1, <1 x i32> zeroinitializer) store <1 x i32> %2, ptr %p, align 4 ret void } define void @xor_cond(ptr %p, i1 %cond) { ; CHECK-LABEL: xor_cond: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: testb $1, %sil ; CHECK-NEXT: cfcmovel %eax, (%rdi) ; CHECK-NEXT: retq entry: %0 = xor i1 %cond, true %1 = insertelement <1 x i1> zeroinitializer, i1 %0, i64 0 call void @llvm.masked.store.v1i32.p0(<1 x i32> zeroinitializer, ptr %p, i32 1, <1 x i1> %1) ret void } define i64 @redundant_test(i64 %num, ptr %p1, i64 %in) { ; CHECK-LABEL: redundant_test: ; CHECK: # %bb.0: ; CHECK-NEXT: testl $-32, %edi ; CHECK-NEXT: cfcmoveq (%rsi), %rax ; CHECK-NEXT: {nf} addq %rdx, %rax ; CHECK-NEXT: cmovneq %rdi, %rax ; CHECK-NEXT: retq %and = and i64 %num, 4294967264 %cmp = icmp eq i64 %and, 0 %mask = bitcast i1 %cmp to <1 x i1> %condload = tail call <1 x i64> @llvm.masked.load.v1i64.p0(ptr %p1, i32 8, <1 x i1> %mask, <1 x i64> poison) %v = bitcast <1 x i64> %condload to i64 %add = add i64 %v, %in %sel = select i1 %cmp, i64 %add, i64 %num ret i64 %sel }