aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2024-01-08 13:00:08 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2024-01-08 17:25:44 +0000
commitfbfc9cb7ea756ea645cc55eea478b819573fc7a5 (patch)
tree5a2bc2a74fb40efde5b9970afcac4cca3c8f69e4
parent9632f987161b4efeb8c087f19a3eb4f7c69cc920 (diff)
downloadllvm-fbfc9cb7ea756ea645cc55eea478b819573fc7a5.zip
llvm-fbfc9cb7ea756ea645cc55eea478b819573fc7a5.tar.gz
llvm-fbfc9cb7ea756ea645cc55eea478b819573fc7a5.tar.bz2
[X86] vector-shuffle-mmx.ll - replace X32 checks with X86. NFC.
We try to use X32 for gnux32 triples only. Add nounwind to remove cfi noise as well.
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-mmx.ll84
1 files changed, 41 insertions, 43 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll b/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll
index 422f522..709be65 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll
@@ -1,17 +1,17 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X32 %s
+; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X86 %s
; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X64 %s
; If there is no explicit MMX type usage, always promote to XMM.
-define void @test0(ptr %x) {
-; X32-LABEL: test0:
-; X32: ## %bb.0: ## %entry
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-NEXT: movlps %xmm0, (%eax)
-; X32-NEXT: retl
+define void @test0(ptr %x) nounwind {
+; X86-LABEL: test0:
+; X86: ## %bb.0: ## %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-NEXT: movlps %xmm0, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: test0:
; X64: ## %bb.0: ## %entry
@@ -28,18 +28,16 @@ entry:
ret void
}
-define void @test1() {
-; X32-LABEL: test1:
-; X32: ## %bb.0: ## %entry
-; X32-NEXT: pushl %edi
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: .cfi_offset %edi, -8
-; X32-NEXT: pxor %mm0, %mm0
-; X32-NEXT: movq {{\.?LCPI[0-9]+_[0-9]+}}, %mm1 ## mm1 = 0x7070606040400000
-; X32-NEXT: xorl %edi, %edi
-; X32-NEXT: maskmovq %mm1, %mm0
-; X32-NEXT: popl %edi
-; X32-NEXT: retl
+define void @test1() nounwind {
+; X86-LABEL: test1:
+; X86: ## %bb.0: ## %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pxor %mm0, %mm0
+; X86-NEXT: movq {{\.?LCPI[0-9]+_[0-9]+}}, %mm1 ## mm1 = 0x7070606040400000
+; X86-NEXT: xorl %edi, %edi
+; X86-NEXT: maskmovq %mm1, %mm0
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
;
; X64-LABEL: test1:
; X64: ## %bb.0: ## %entry
@@ -63,13 +61,13 @@ entry:
@tmp_V2i = common global <2 x i32> zeroinitializer
define void @test2() nounwind {
-; X32-LABEL: test2:
-; X32: ## %bb.0: ## %entry
-; X32-NEXT: movl L_tmp_V2i$non_lazy_ptr, %eax
-; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; X32-NEXT: movlps %xmm0, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: test2:
+; X86: ## %bb.0: ## %entry
+; X86-NEXT: movl L_tmp_V2i$non_lazy_ptr, %eax
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X86-NEXT: movlps %xmm0, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: test2:
; X64: ## %bb.0: ## %entry
@@ -86,21 +84,21 @@ entry:
}
define <4 x float> @pr35869() nounwind {
-; X32-LABEL: pr35869:
-; X32: ## %bb.0:
-; X32-NEXT: movl $64, %eax
-; X32-NEXT: movd %eax, %mm0
-; X32-NEXT: pxor %mm1, %mm1
-; X32-NEXT: punpcklbw %mm1, %mm0 ## mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3]
-; X32-NEXT: pcmpgtw %mm0, %mm1
-; X32-NEXT: movq %mm0, %mm2
-; X32-NEXT: punpckhwd %mm1, %mm2 ## mm2 = mm2[2],mm1[2],mm2[3],mm1[3]
-; X32-NEXT: xorps %xmm0, %xmm0
-; X32-NEXT: cvtpi2ps %mm2, %xmm0
-; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
-; X32-NEXT: punpcklwd %mm1, %mm0 ## mm0 = mm0[0],mm1[0],mm0[1],mm1[1]
-; X32-NEXT: cvtpi2ps %mm0, %xmm0
-; X32-NEXT: retl
+; X86-LABEL: pr35869:
+; X86: ## %bb.0:
+; X86-NEXT: movl $64, %eax
+; X86-NEXT: movd %eax, %mm0
+; X86-NEXT: pxor %mm1, %mm1
+; X86-NEXT: punpcklbw %mm1, %mm0 ## mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3]
+; X86-NEXT: pcmpgtw %mm0, %mm1
+; X86-NEXT: movq %mm0, %mm2
+; X86-NEXT: punpckhwd %mm1, %mm2 ## mm2 = mm2[2],mm1[2],mm2[3],mm1[3]
+; X86-NEXT: xorps %xmm0, %xmm0
+; X86-NEXT: cvtpi2ps %mm2, %xmm0
+; X86-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X86-NEXT: punpcklwd %mm1, %mm0 ## mm0 = mm0[0],mm1[0],mm0[1],mm1[1]
+; X86-NEXT: cvtpi2ps %mm0, %xmm0
+; X86-NEXT: retl
;
; X64-LABEL: pr35869:
; X64: ## %bb.0: