; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_mem_shuffle --version 4 ; RUN: llc -mtriple=i686-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s --check-prefix=X86 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s --check-prefix=X64 ; Test for case where insertps folds the load of an insertion element from a constant pool. define <4 x float> @fold_from_constantpool(<4 x float> %a) { ; X86-LABEL: fold_from_constantpool: ; X86: # %bb.0: ; X86-NEXT: insertps $0, {{\.?LCPI[0-9]+_[0-9]+}}+4, %xmm0 # xmm0 = mem[0],xmm0[1,2,3] ; X86-NEXT: retl ; ; X64-LABEL: fold_from_constantpool: ; X64: # %bb.0: ; X64-NEXT: insertps $0, {{\.?LCPI[0-9]+_[0-9]+}}+4(%rip), %xmm0 # xmm0 = mem[0],xmm0[1,2,3] ; X64-NEXT: retq %1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> , i8 64) ret <4 x float> %1 } declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounwind readnone