aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBill Wendling <isanbard@gmail.com>2011-03-25 06:22:54 +0000
committerBill Wendling <isanbard@gmail.com>2011-03-25 06:22:54 +0000
commit8c6b10ade4e4766ac951e6a757674048537a74da (patch)
treecb779f3c7c91d9bcf21909d2bd9f3dd2fa7c442a
parentcedc390db046c7832e1ff7e576df07bce787fe72 (diff)
downloadllvm-8c6b10ade4e4766ac951e6a757674048537a74da.zip
llvm-8c6b10ade4e4766ac951e6a757674048537a74da.tar.gz
llvm-8c6b10ade4e4766ac951e6a757674048537a74da.tar.bz2
--- Merging r127731 into '.':
U test/CodeGen/X86/byval2.ll U test/CodeGen/X86/byval4.ll U test/CodeGen/X86/byval.ll U test/CodeGen/X86/byval3.ll U test/CodeGen/X86/byval5.ll --- Merging r127732 into '.': U test/CodeGen/X86/stdarg.ll U test/CodeGen/X86/fold-mul-lohi.ll U test/CodeGen/X86/scalar-min-max-fill-operand.ll U test/CodeGen/X86/tailcallbyval64.ll U test/CodeGen/X86/stride-reuse.ll U test/CodeGen/X86/sse-align-3.ll U test/CodeGen/X86/sse-commute.ll U test/CodeGen/X86/stride-nine-with-base-reg.ll U test/CodeGen/X86/coalescer-commute2.ll U test/CodeGen/X86/sse-align-7.ll U test/CodeGen/X86/sse_reload_fold.ll U test/CodeGen/X86/sse-align-0.ll --- Merging r127733 into '.': U test/CodeGen/X86/peep-vector-extract-concat.ll U test/CodeGen/X86/pmulld.ll U test/CodeGen/X86/widen_load-0.ll U test/CodeGen/X86/v2f32.ll U test/CodeGen/X86/apm.ll U test/CodeGen/X86/h-register-store.ll U test/CodeGen/X86/h-registers-0.ll --- Merging r127734 into '.': U test/CodeGen/X86/2007-01-08-X86-64-Pointer.ll U test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll U test/CodeGen/X86/avoid-lea-scale2.ll U test/CodeGen/X86/lea-3.ll U test/CodeGen/X86/vec_set-8.ll U test/CodeGen/X86/i64-mem-copy.ll U test/CodeGen/X86/x86-64-malloc.ll U test/CodeGen/X86/mmx-copy-gprs.ll U test/CodeGen/X86/vec_shuffle-17.ll U test/CodeGen/X86/2007-07-18-Vector-Extract.ll --- Merging r127775 into '.': U test/CodeGen/X86/constant-pool-remat-0.ll --- Merging r127872 into '.': U utils/lit/lit/TestingConfig.py U lib/Support/raw_ostream.cpp llvm-svn: 128258
-rw-r--r--llvm/lib/Support/raw_ostream.cpp30
-rw-r--r--llvm/test/CodeGen/X86/2007-01-08-X86-64-Pointer.ll7
-rw-r--r--llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll6
-rw-r--r--llvm/test/CodeGen/X86/apm.ll11
-rw-r--r--llvm/test/CodeGen/X86/avoid-lea-scale2.ll4
-rw-r--r--llvm/test/CodeGen/X86/byval.ll3
-rw-r--r--llvm/test/CodeGen/X86/byval2.ll27
-rw-r--r--llvm/test/CodeGen/X86/byval3.ll27
-rw-r--r--llvm/test/CodeGen/X86/byval4.ll27
-rw-r--r--llvm/test/CodeGen/X86/byval5.ll27
-rw-r--r--llvm/test/CodeGen/X86/coalescer-commute2.ll9
-rw-r--r--llvm/test/CodeGen/X86/constant-pool-remat-0.ll16
-rw-r--r--llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll10
-rw-r--r--llvm/test/CodeGen/X86/fold-mul-lohi.ll5
-rw-r--r--llvm/test/CodeGen/X86/h-register-store.ll32
-rw-r--r--llvm/test/CodeGen/X86/h-registers-0.ll26
-rw-r--r--llvm/test/CodeGen/X86/i64-mem-copy.ll8
-rw-r--r--llvm/test/CodeGen/X86/lea-3.ll15
-rw-r--r--llvm/test/CodeGen/X86/mmx-copy-gprs.ll10
-rw-r--r--llvm/test/CodeGen/X86/peep-vector-extract-concat.ll7
-rw-r--r--llvm/test/CodeGen/X86/pmulld.ll12
-rw-r--r--llvm/test/CodeGen/X86/scalar-min-max-fill-operand.ll13
-rw-r--r--llvm/test/CodeGen/X86/sse-align-0.ll3
-rw-r--r--llvm/test/CodeGen/X86/sse-align-3.ll7
-rw-r--r--llvm/test/CodeGen/X86/sse-align-7.ll4
-rw-r--r--llvm/test/CodeGen/X86/sse-commute.ll2
-rw-r--r--llvm/test/CodeGen/X86/sse_reload_fold.ll5
-rw-r--r--llvm/test/CodeGen/X86/stdarg.ll3
-rw-r--r--llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll5
-rw-r--r--llvm/test/CodeGen/X86/stride-reuse.ll5
-rw-r--r--llvm/test/CodeGen/X86/tailcallbyval64.ll25
-rw-r--r--llvm/test/CodeGen/X86/v2f32.ll67
-rw-r--r--llvm/test/CodeGen/X86/vec_set-8.ll7
-rw-r--r--llvm/test/CodeGen/X86/vec_shuffle-17.ll7
-rw-r--r--llvm/test/CodeGen/X86/widen_load-0.ll8
-rw-r--r--llvm/test/CodeGen/X86/x86-64-malloc.ll4
-rw-r--r--llvm/utils/lit/lit/TestingConfig.py1
37 files changed, 401 insertions, 84 deletions
diff --git a/llvm/lib/Support/raw_ostream.cpp b/llvm/lib/Support/raw_ostream.cpp
index b175d20..d9fb406 100644
--- a/llvm/lib/Support/raw_ostream.cpp
+++ b/llvm/lib/Support/raw_ostream.cpp
@@ -220,6 +220,36 @@ raw_ostream &raw_ostream::operator<<(const void *P) {
}
raw_ostream &raw_ostream::operator<<(double N) {
+#ifdef _WIN32
+ // On MSVCRT and compatible, output of %e is incompatible to Posix
+ // by default. Number of exponent digits should be at least 2. "%+03d"
+ // FIXME: Implement our formatter to here or Support/Format.h!
+ int fpcl = _fpclass(N);
+
+ // negative zero
+ if (fpcl == _FPCLASS_NZ)
+ return *this << "-0.000000e+00";
+
+ char buf[16];
+ unsigned len;
+ len = snprintf(buf, sizeof(buf), "%e", N);
+ if (len <= sizeof(buf) - 2) {
+ if (len >= 5 && buf[len - 5] == 'e' && buf[len - 3] == '0') {
+ int cs = buf[len - 4];
+ if (cs == '+' || cs == '-') {
+ int c1 = buf[len - 2];
+ int c0 = buf[len - 1];
+ if (isdigit(c1) && isdigit(c0)) {
+ // Trim leading '0': "...e+012" -> "...e+12\0"
+ buf[len - 3] = c1;
+ buf[len - 2] = c0;
+ buf[--len] = 0;
+ }
+ }
+ }
+ return this->operator<<(buf);
+ }
+#endif
return this->operator<<(format("%e", N));
}
diff --git a/llvm/test/CodeGen/X86/2007-01-08-X86-64-Pointer.ll b/llvm/test/CodeGen/X86/2007-01-08-X86-64-Pointer.ll
index de226a1..3458550 100644
--- a/llvm/test/CodeGen/X86/2007-01-08-X86-64-Pointer.ll
+++ b/llvm/test/CodeGen/X86/2007-01-08-X86-64-Pointer.ll
@@ -1,5 +1,8 @@
-; RUN: llc %s -o - -march=x86-64 | grep {(%rdi,%rax,8)}
-; RUN: llc %s -o - -march=x86-64 | not grep {addq.*8}
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s
+; CHECK-NOT: {{addq.*8}}
+; CHECK: ({{%rdi|%rcx}},%rax,8)
+; CHECK-NOT: {{addq.*8}}
define void @foo(double* %y) nounwind {
entry:
diff --git a/llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll b/llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll
index 8625b27..6288c4a 100644
--- a/llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll
+++ b/llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse | grep {movq (%rdi), %rax}
-; RUN: llc < %s -march=x86-64 -mattr=+sse | grep {movq 8(%rdi), %rax}
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse | FileCheck %s
+; CHECK: movq ([[A0:%rdi|%rcx]]), %rax
+; CHECK: movq 8([[A0]]), %rax
define i64 @foo_0(<2 x i64>* %val) {
entry:
%val12 = getelementptr <2 x i64>* %val, i32 0, i32 0 ; <i64*> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/apm.ll b/llvm/test/CodeGen/X86/apm.ll
index d0c64f2..b514cf6 100644
--- a/llvm/test/CodeGen/X86/apm.ll
+++ b/llvm/test/CodeGen/X86/apm.ll
@@ -1,10 +1,16 @@
-; RUN: llc < %s -o - -march=x86-64 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
; PR8573
; CHECK: foo:
; CHECK: leaq (%rdi), %rax
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: monitor
+; WIN64: foo:
+; WIN64: leaq (%rcx), %rax
+; WIN64-NEXT: movl %edx, %ecx
+; WIN64-NEXT: movl %r8d, %edx
+; WIN64-NEXT: monitor
define void @foo(i8* %P, i32 %E, i32 %H) nounwind {
entry:
tail call void @llvm.x86.sse3.monitor(i8* %P, i32 %E, i32 %H)
@@ -17,6 +23,9 @@ declare void @llvm.x86.sse3.monitor(i8*, i32, i32) nounwind
; CHECK: movl %edi, %ecx
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: mwait
+; WIN64: bar:
+; WIN64: movl %edx, %eax
+; WIN64-NEXT: mwait
define void @bar(i32 %E, i32 %H) nounwind {
entry:
tail call void @llvm.x86.sse3.mwait(i32 %E, i32 %H)
diff --git a/llvm/test/CodeGen/X86/avoid-lea-scale2.ll b/llvm/test/CodeGen/X86/avoid-lea-scale2.ll
index 8003de2..cee2ee4 100644
--- a/llvm/test/CodeGen/X86/avoid-lea-scale2.ll
+++ b/llvm/test/CodeGen/X86/avoid-lea-scale2.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=x86-64 | grep {leal.*-2(\[%\]rdi,\[%\]rdi)}
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s
+; CHECK: leal -2({{%rdi,%rdi|%rcx,%rcx}})
define i32 @foo(i32 %x) nounwind readnone {
%t0 = shl i32 %x, 1
diff --git a/llvm/test/CodeGen/X86/byval.ll b/llvm/test/CodeGen/X86/byval.ll
index ac0bc09..185eda1 100644
--- a/llvm/test/CodeGen/X86/byval.ll
+++ b/llvm/test/CodeGen/X86/byval.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=x86-64 | FileCheck -check-prefix=X86-64 %s
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck -check-prefix=X86-64 %s
+; Win64 has not supported byval yet.
; RUN: llc < %s -march=x86 | FileCheck -check-prefix=X86 %s
; X86: movl 4(%esp), %eax
diff --git a/llvm/test/CodeGen/X86/byval2.ll b/llvm/test/CodeGen/X86/byval2.ll
index 71129f5..03a9f0f 100644
--- a/llvm/test/CodeGen/X86/byval2.ll
+++ b/llvm/test/CodeGen/X86/byval2.ll
@@ -1,5 +1,28 @@
-; RUN: llc < %s -march=x86-64 | grep rep.movsq | count 2
-; RUN: llc < %s -march=x86 | grep rep.movsl | count 2
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X64
+; X64-NOT: movsq
+; X64: rep
+; X64-NOT: rep
+; X64: movsq
+; X64-NOT: movsq
+; X64: rep
+; X64-NOT: rep
+; X64: movsq
+; X64-NOT: rep
+; X64-NOT: movsq
+
+; Win64 has not supported byval yet.
+
+; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X32
+; X32-NOT: movsl
+; X32: rep
+; X32-NOT: rep
+; X32: movsl
+; X32-NOT: movsl
+; X32: rep
+; X32-NOT: rep
+; X32: movsl
+; X32-NOT: rep
+; X32-NOT: movsl
%struct.s = type { i64, i64, i64, i64, i64, i64, i64, i64,
i64, i64, i64, i64, i64, i64, i64, i64,
diff --git a/llvm/test/CodeGen/X86/byval3.ll b/llvm/test/CodeGen/X86/byval3.ll
index 504e0be..8d5bb6d 100644
--- a/llvm/test/CodeGen/X86/byval3.ll
+++ b/llvm/test/CodeGen/X86/byval3.ll
@@ -1,5 +1,28 @@
-; RUN: llc < %s -march=x86-64 | grep rep.movsq | count 2
-; RUN: llc < %s -march=x86 | grep rep.movsl | count 2
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X64
+; X64-NOT: movsq
+; X64: rep
+; X64-NOT: rep
+; X64: movsq
+; X64-NOT: movsq
+; X64: rep
+; X64-NOT: rep
+; X64: movsq
+; X64-NOT: rep
+; X64-NOT: movsq
+
+; Win64 has not supported byval yet.
+
+; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X32
+; X32-NOT: movsl
+; X32: rep
+; X32-NOT: rep
+; X32: movsl
+; X32-NOT: movsl
+; X32: rep
+; X32-NOT: rep
+; X32: movsl
+; X32-NOT: rep
+; X32-NOT: movsl
%struct.s = type { i32, i32, i32, i32, i32, i32, i32, i32,
i32, i32, i32, i32, i32, i32, i32, i32,
diff --git a/llvm/test/CodeGen/X86/byval4.ll b/llvm/test/CodeGen/X86/byval4.ll
index 4db9d65..ae1a79a 100644
--- a/llvm/test/CodeGen/X86/byval4.ll
+++ b/llvm/test/CodeGen/X86/byval4.ll
@@ -1,5 +1,28 @@
-; RUN: llc < %s -march=x86-64 | grep rep.movsq | count 2
-; RUN: llc < %s -march=x86 | grep rep.movsl | count 2
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X64
+; X64-NOT: movsq
+; X64: rep
+; X64-NOT: rep
+; X64: movsq
+; X64-NOT: movsq
+; X64: rep
+; X64-NOT: rep
+; X64: movsq
+; X64-NOT: rep
+; X64-NOT: movsq
+
+; Win64 has not supported byval yet.
+
+; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X32
+; X32-NOT: movsl
+; X32: rep
+; X32-NOT: rep
+; X32: movsl
+; X32-NOT: movsl
+; X32: rep
+; X32-NOT: rep
+; X32: movsl
+; X32-NOT: rep
+; X32-NOT: movsl
%struct.s = type { i16, i16, i16, i16, i16, i16, i16, i16,
i16, i16, i16, i16, i16, i16, i16, i16,
diff --git a/llvm/test/CodeGen/X86/byval5.ll b/llvm/test/CodeGen/X86/byval5.ll
index 69c115b..a376709 100644
--- a/llvm/test/CodeGen/X86/byval5.ll
+++ b/llvm/test/CodeGen/X86/byval5.ll
@@ -1,5 +1,28 @@
-; RUN: llc < %s -march=x86-64 | grep rep.movsq | count 2
-; RUN: llc < %s -march=x86 | grep rep.movsl | count 2
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X64
+; X64-NOT: movsq
+; X64: rep
+; X64-NOT: rep
+; X64: movsq
+; X64-NOT: movsq
+; X64: rep
+; X64-NOT: rep
+; X64: movsq
+; X64-NOT: rep
+; X64-NOT: movsq
+
+; Win64 has not supported byval yet.
+
+; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X32
+; X32-NOT: movsl
+; X32: rep
+; X32-NOT: rep
+; X32: movsl
+; X32-NOT: movsl
+; X32: rep
+; X32-NOT: rep
+; X32: movsl
+; X32-NOT: rep
+; X32-NOT: movsl
%struct.s = type { i8, i8, i8, i8, i8, i8, i8, i8,
i8, i8, i8, i8, i8, i8, i8, i8,
diff --git a/llvm/test/CodeGen/X86/coalescer-commute2.ll b/llvm/test/CodeGen/X86/coalescer-commute2.ll
index 5d10bba..7306920 100644
--- a/llvm/test/CodeGen/X86/coalescer-commute2.ll
+++ b/llvm/test/CodeGen/X86/coalescer-commute2.ll
@@ -1,5 +1,10 @@
-; RUN: llc < %s -march=x86-64 | grep paddw | count 2
-; RUN: llc < %s -march=x86-64 | not grep mov
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT: mov
+; CHECK: paddw
+; CHECK-NOT: mov
+; CHECK: paddw
+; CHECK-NOT: paddw
+; CHECK-NOT: mov
; The 2-addr pass should ensure that identical code is produced for these functions
; no extra copy should be generated.
diff --git a/llvm/test/CodeGen/X86/constant-pool-remat-0.ll b/llvm/test/CodeGen/X86/constant-pool-remat-0.ll
index 2a44463..f1b061f 100644
--- a/llvm/test/CodeGen/X86/constant-pool-remat-0.ll
+++ b/llvm/test/CodeGen/X86/constant-pool-remat-0.ll
@@ -1,7 +1,15 @@
-; RUN: llc < %s -march=x86-64 | grep LCPI | count 3
-; RUN: llc < %s -march=x86-64 -o /dev/null -stats -info-output-file - | grep asm-printer | grep 6
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep LCPI | count 3
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o /dev/null -stats -info-output-file - | grep asm-printer | grep 12
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
+; CHECK: LCPI
+; CHECK: LCPI
+; CHECK: LCPI
+; CHECK-NOT: LCPI
+
+; RUN: llc < %s -mtriple=x86_64-linux -o /dev/null -stats -info-output-file - | FileCheck %s -check-prefix=X64stat
+; X64stat: 6 asm-printer
+
+; RUN: llc < %s -march=x86 -mattr=+sse2 -o /dev/null -stats -info-output-file - | FileCheck %s -check-prefix=X32stat
+; X32stat: 12 asm-printer
declare float @qux(float %y)
diff --git a/llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll b/llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll
index 17cb2b3..b82348b 100644
--- a/llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll
+++ b/llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll
@@ -1,6 +1,10 @@
-; RUN: llc < %s -march=x86-64 -o %t -stats -info-output-file - | \
-; RUN: grep {asm-printer} | grep {Number of machine instrs printed} | grep 9
-; RUN: grep {leal 1(\%rsi),} %t
+; RUN: llc < %s -mtriple=x86_64-linux -o /dev/null -stats |& FileCheck %s -check-prefix=STATS
+; RUN: llc < %s -mtriple=x86_64-win32 -o /dev/null -stats |& FileCheck %s -check-prefix=STATS
+; STATS: 9 asm-printer
+
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s
+; CHECK: leal 1({{%rsi|%rdx}}),
define fastcc zeroext i8 @fullGtU(i32 %i1, i32 %i2, i8* %ptr) nounwind optsize {
entry:
diff --git a/llvm/test/CodeGen/X86/fold-mul-lohi.ll b/llvm/test/CodeGen/X86/fold-mul-lohi.ll
index 0351eca..5614c80 100644
--- a/llvm/test/CodeGen/X86/fold-mul-lohi.ll
+++ b/llvm/test/CodeGen/X86/fold-mul-lohi.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=x86 | not grep lea
-; RUN: llc < %s -march=x86-64 | not grep lea
+; RUN: llc < %s -march=x86 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT: lea
@B = external global [1000 x i8], align 32
@A = external global [1000 x i8], align 32
diff --git a/llvm/test/CodeGen/X86/h-register-store.ll b/llvm/test/CodeGen/X86/h-register-store.ll
index d30e6b3..0adb2b1 100644
--- a/llvm/test/CodeGen/X86/h-register-store.ll
+++ b/llvm/test/CodeGen/X86/h-register-store.ll
@@ -1,9 +1,29 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep mov %t | count 6
-; RUN: grep {movb %ah, (%rsi)} %t | count 3
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep mov %t | count 3
-; RUN: grep {movb %ah, (%e} %t | count 3
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X64
+; X64: mov
+; X64-NEXT: movb %ah, (%rsi)
+; X64: mov
+; X64-NEXT: movb %ah, (%rsi)
+; X64: mov
+; X64-NEXT: movb %ah, (%rsi)
+; X64-NOT: mov
+
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=W64
+; W64-NOT: mov
+; W64: movb %ch, (%rdx)
+; W64-NOT: mov
+; W64: movb %ch, (%rdx)
+; W64-NOT: mov
+; W64: movb %ch, (%rdx)
+; W64-NOT: mov
+
+; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X32
+; X32-NOT: mov
+; X32: movb %ah, (%e
+; X32-NOT: mov
+; X32: movb %ah, (%e
+; X32-NOT: mov
+; X32: movb %ah, (%e
+; X32-NOT: mov
; Use h-register extract and store.
diff --git a/llvm/test/CodeGen/X86/h-registers-0.ll b/llvm/test/CodeGen/X86/h-registers-0.ll
index e84bb9a..cdc75af 100644
--- a/llvm/test/CodeGen/X86/h-registers-0.ll
+++ b/llvm/test/CodeGen/X86/h-registers-0.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s -check-prefix=X86-64
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X86-64
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X86-32
; Use h registers. On x86-64, codegen doesn't support general allocation
@@ -9,6 +10,12 @@ define void @bar64(i64 inreg %x, i8* inreg %p) nounwind {
; X86-64: shrq $8, %rdi
; X86-64: incb %dil
+; See FIXME: on regclass GR8.
+; It could be optimally transformed like; incb %ch; movb %ch, (%rdx)
+; WIN64: bar64:
+; WIN64: shrq $8, %rcx
+; WIN64: incb %cl
+
; X86-32: bar64:
; X86-32: incb %ah
%t0 = lshr i64 %x, 8
@@ -23,6 +30,10 @@ define void @bar32(i32 inreg %x, i8* inreg %p) nounwind {
; X86-64: shrl $8, %edi
; X86-64: incb %dil
+; WIN64: bar32:
+; WIN64: shrl $8, %ecx
+; WIN64: incb %cl
+
; X86-32: bar32:
; X86-32: incb %ah
%t0 = lshr i32 %x, 8
@@ -37,6 +48,10 @@ define void @bar16(i16 inreg %x, i8* inreg %p) nounwind {
; X86-64: shrl $8, %edi
; X86-64: incb %dil
+; WIN64: bar16:
+; WIN64: shrl $8, %ecx
+; WIN64: incb %cl
+
; X86-32: bar16:
; X86-32: incb %ah
%t0 = lshr i16 %x, 8
@@ -51,6 +66,9 @@ define i64 @qux64(i64 inreg %x) nounwind {
; X86-64: movq %rdi, %rax
; X86-64: movzbl %ah, %eax
+; WIN64: qux64:
+; WIN64: movzbl %ch, %eax
+
; X86-32: qux64:
; X86-32: movzbl %ah, %eax
%t0 = lshr i64 %x, 8
@@ -63,6 +81,9 @@ define i32 @qux32(i32 inreg %x) nounwind {
; X86-64: movl %edi, %eax
; X86-64: movzbl %ah, %eax
+; WIN64: qux32:
+; WIN64: movzbl %ch, %eax
+
; X86-32: qux32:
; X86-32: movzbl %ah, %eax
%t0 = lshr i32 %x, 8
@@ -75,6 +96,9 @@ define i16 @qux16(i16 inreg %x) nounwind {
; X86-64: movl %edi, %eax
; X86-64: movzbl %ah, %eax
+; WIN64: qux16:
+; WIN64: movzbl %ch, %eax
+
; X86-32: qux16:
; X86-32: movzbl %ah, %eax
%t0 = lshr i16 %x, 8
diff --git a/llvm/test/CodeGen/X86/i64-mem-copy.ll b/llvm/test/CodeGen/X86/i64-mem-copy.ll
index 847e209..dce12ae 100644
--- a/llvm/test/CodeGen/X86/i64-mem-copy.ll
+++ b/llvm/test/CodeGen/X86/i64-mem-copy.ll
@@ -1,5 +1,9 @@
-; RUN: llc < %s -march=x86-64 | grep {movq.*(%rsi), %rax}
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep {movsd.*(%eax),}
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=X64
+; X64: movq ({{%rsi|%rdx}}), %r
+
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s -check-prefix=X32
+; X32: movsd (%eax), %xmm
; Uses movsd to load / store i64 values if sse2 is available.
diff --git a/llvm/test/CodeGen/X86/lea-3.ll b/llvm/test/CodeGen/X86/lea-3.ll
index 44413d6..040c5c2 100644
--- a/llvm/test/CodeGen/X86/lea-3.ll
+++ b/llvm/test/CodeGen/X86/lea-3.ll
@@ -1,16 +1,19 @@
-; RUN: llc < %s -march=x86-64 | grep {leal (%rdi,%rdi,2), %eax}
-define i32 @test(i32 %a) {
- %tmp2 = mul i32 %a, 3 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s
-; RUN: llc < %s -march=x86-64 | grep {leaq (,%rdi,4), %rax}
+; CHECK: leaq (,[[A0:%rdi|%rcx]],4), %rax
define i64 @test2(i64 %a) {
%tmp2 = shl i64 %a, 2
%tmp3 = or i64 %tmp2, %a
ret i64 %tmp3
}
+; CHECK: leal ([[A0]],[[A0]],2), %eax
+define i32 @test(i32 %a) {
+ %tmp2 = mul i32 %a, 3 ; <i32> [#uses=1]
+ ret i32 %tmp2
+}
+
;; TODO! LEA instead of shift + copy.
define i64 @test3(i64 %a) {
%tmp2 = shl i64 %a, 3
diff --git a/llvm/test/CodeGen/X86/mmx-copy-gprs.ll b/llvm/test/CodeGen/X86/mmx-copy-gprs.ll
index 3607043..6cb21ca 100644
--- a/llvm/test/CodeGen/X86/mmx-copy-gprs.ll
+++ b/llvm/test/CodeGen/X86/mmx-copy-gprs.ll
@@ -1,6 +1,10 @@
-; RUN: llc < %s -march=x86-64 | grep {movq.*(%rsi), %rax}
-; RUN: llc < %s -march=x86 -mattr=-sse2 | grep {movl.*4(%eax),}
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep {movsd.(%eax),}
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=X64
+; X64: movq ({{%rsi|%rdx}}), %rax
+; RUN: llc < %s -march=x86 -mattr=-sse2 | FileCheck %s -check-prefix=X32
+; X32: movl 4(%eax),
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s -check-prefix=XMM
+; XMM: movsd (%eax),
; This test should use GPRs to copy the mmx value, not MMX regs. Using mmx regs,
; increases the places that need to use emms.
diff --git a/llvm/test/CodeGen/X86/peep-vector-extract-concat.ll b/llvm/test/CodeGen/X86/peep-vector-extract-concat.ll
index e4ab2b5..606a9be 100644
--- a/llvm/test/CodeGen/X86/peep-vector-extract-concat.ll
+++ b/llvm/test/CodeGen/X86/peep-vector-extract-concat.ll
@@ -1,4 +1,9 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse2,-sse41 | grep {pshufd \$3, %xmm0, %xmm0}
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2,-sse41 | FileCheck %s
+; CHECK: pshufd $3, %xmm0, %xmm0
+
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2,-sse41 | FileCheck %s -check-prefix=WIN64
+; %a is passed indirectly on Win64.
+; WIN64: movss 12(%rcx), %xmm0
define float @foo(<8 x float> %a) nounwind {
%c = extractelement <8 x float> %a, i32 3
diff --git a/llvm/test/CodeGen/X86/pmulld.ll b/llvm/test/CodeGen/X86/pmulld.ll
index 3ef5941..be527ae 100644
--- a/llvm/test/CodeGen/X86/pmulld.ll
+++ b/llvm/test/CodeGen/X86/pmulld.ll
@@ -1,8 +1,13 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse41 -asm-verbose=0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse41 -asm-verbose=0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse41 -asm-verbose=0 | FileCheck %s -check-prefix=WIN64
define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
; CHECK: test1:
; CHECK-NEXT: pmulld
+
+; WIN64: test1:
+; WIN64-NEXT: movdqa (%rcx), %xmm0
+; WIN64-NEXT: pmulld (%rdx), %xmm0
%C = mul <4 x i32> %A, %B
ret <4 x i32> %C
}
@@ -10,6 +15,11 @@ define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
define <4 x i32> @test1a(<4 x i32> %A, <4 x i32> *%Bp) nounwind {
; CHECK: test1a:
; CHECK-NEXT: pmulld
+
+; WIN64: test1a:
+; WIN64-NEXT: movdqa (%rcx), %xmm0
+; WIN64-NEXT: pmulld (%rdx), %xmm0
+
%B = load <4 x i32>* %Bp
%C = mul <4 x i32> %A, %B
ret <4 x i32> %C
diff --git a/llvm/test/CodeGen/X86/scalar-min-max-fill-operand.ll b/llvm/test/CodeGen/X86/scalar-min-max-fill-operand.ll
index fe40758..2f90932 100644
--- a/llvm/test/CodeGen/X86/scalar-min-max-fill-operand.ll
+++ b/llvm/test/CodeGen/X86/scalar-min-max-fill-operand.ll
@@ -1,6 +1,13 @@
-; RUN: llc < %s -march=x86-64 | grep min | count 1
-; RUN: llc < %s -march=x86-64 | grep max | count 1
-; RUN: llc < %s -march=x86-64 | grep mov | count 2
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT: {{(min|max|mov)}}
+; CHECK: mov
+; CHECK-NOT: {{(min|max|mov)}}
+; CHECK: min
+; CHECK-NOT: {{(min|max|mov)}}
+; CHECK: mov
+; CHECK-NOT: {{(min|max|mov)}}
+; CHECK: max
+; CHECK-NOT: {{(min|max|mov)}}
declare float @bar()
diff --git a/llvm/test/CodeGen/X86/sse-align-0.ll b/llvm/test/CodeGen/X86/sse-align-0.ll
index b12a87d..8ffd312 100644
--- a/llvm/test/CodeGen/X86/sse-align-0.ll
+++ b/llvm/test/CodeGen/X86/sse-align-0.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=x86-64 | not grep mov
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT: mov
define <4 x float> @foo(<4 x float>* %p, <4 x float> %x) nounwind {
%t = load <4 x float>* %p
diff --git a/llvm/test/CodeGen/X86/sse-align-3.ll b/llvm/test/CodeGen/X86/sse-align-3.ll
index c42f7f0..04f2161 100644
--- a/llvm/test/CodeGen/X86/sse-align-3.ll
+++ b/llvm/test/CodeGen/X86/sse-align-3.ll
@@ -1,4 +1,9 @@
-; RUN: llc < %s -march=x86-64 | grep movap | count 2
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT: movapd
+; CHECK: movaps
+; CHECK-NOT: movaps
+; CHECK: movapd
+; CHECK-NOT: movap
define void @foo(<4 x float>* %p, <4 x float> %x) nounwind {
store <4 x float> %x, <4 x float>* %p
diff --git a/llvm/test/CodeGen/X86/sse-align-7.ll b/llvm/test/CodeGen/X86/sse-align-7.ll
index 5784481..e55d585 100644
--- a/llvm/test/CodeGen/X86/sse-align-7.ll
+++ b/llvm/test/CodeGen/X86/sse-align-7.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=x86-64 | grep movaps | count 1
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK: movaps
+; CHECK-NOT: movaps
define void @bar(<2 x i64>* %p, <2 x i64> %x) nounwind {
store <2 x i64> %x, <2 x i64>* %p
diff --git a/llvm/test/CodeGen/X86/sse-commute.ll b/llvm/test/CodeGen/X86/sse-commute.ll
index 38ed644..336bf06 100644
--- a/llvm/test/CodeGen/X86/sse-commute.ll
+++ b/llvm/test/CodeGen/X86/sse-commute.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=x86-64 < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-linux < %s | FileCheck %s
; Commute the comparison to avoid a move.
; PR7500.
diff --git a/llvm/test/CodeGen/X86/sse_reload_fold.ll b/llvm/test/CodeGen/X86/sse_reload_fold.ll
index dc3d6fe..02399c4 100644
--- a/llvm/test/CodeGen/X86/sse_reload_fold.ll
+++ b/llvm/test/CodeGen/X86/sse_reload_fold.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=x86-64 -mattr=+64bit,+sse3 -print-failed-fuse-candidates |& \
-; RUN: grep fail | count 1
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+64bit,+sse3 -print-failed-fuse-candidates |& FileCheck %s
+; CHECK: fail
+; CHECK-NOT: fail
declare float @test_f(float %f)
declare double @test_d(double %f)
diff --git a/llvm/test/CodeGen/X86/stdarg.ll b/llvm/test/CodeGen/X86/stdarg.ll
index 9778fa1..5728daf 100644
--- a/llvm/test/CodeGen/X86/stdarg.ll
+++ b/llvm/test/CodeGen/X86/stdarg.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=x86-64 | grep {testb \[%\]al, \[%\]al}
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK: testb %al, %al
%struct.__va_list_tag = type { i32, i32, i8*, i8* }
diff --git a/llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll b/llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll
index f4847a3..ddf059c 100644
--- a/llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll
+++ b/llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=x86 -relocation-model=static | not grep lea
-; RUN: llc < %s -march=x86-64 | not grep lea
+; RUN: llc < %s -march=x86 -relocation-model=static | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT: lea
; P should be sunk into the loop and folded into the address mode. There
; shouldn't be any lea instructions inside the loop.
diff --git a/llvm/test/CodeGen/X86/stride-reuse.ll b/llvm/test/CodeGen/X86/stride-reuse.ll
index 5cbd895..1251a24 100644
--- a/llvm/test/CodeGen/X86/stride-reuse.ll
+++ b/llvm/test/CodeGen/X86/stride-reuse.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=x86 | not grep lea
-; RUN: llc < %s -march=x86-64 | not grep lea
+; RUN: llc < %s -march=x86 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT: lea
@B = external global [1000 x float], align 32
@A = external global [1000 x float], align 32
diff --git a/llvm/test/CodeGen/X86/tailcallbyval64.ll b/llvm/test/CodeGen/X86/tailcallbyval64.ll
index 7c685b8..1b1efe7 100644
--- a/llvm/test/CodeGen/X86/tailcallbyval64.ll
+++ b/llvm/test/CodeGen/X86/tailcallbyval64.ll
@@ -1,15 +1,30 @@
-; RUN: llc < %s -march=x86-64 -tailcallopt | grep TAILCALL
+; RUN: llc < %s -mtriple=x86_64-linux -tailcallopt | FileCheck %s
+
+; FIXME: Win64 does not support byval.
+
+; Expect the entry point.
+; CHECK: tailcaller:
+
; Expect 2 rep;movs because of tail call byval lowering.
-; RUN: llc < %s -march=x86-64 -tailcallopt | grep rep | wc -l | grep 2
+; CHECK: rep;
+; CHECK: rep;
+
; A sequence of copyto/copyfrom virtual registers is used to deal with byval
; lowering appearing after moving arguments to registers. The following two
; checks verify that the register allocator changes those sequences to direct
; moves to argument register where it can (for registers that are not used in
; byval lowering - not rsi, not rdi, not rcx).
; Expect argument 4 to be moved directly to register edx.
-; RUN: llc < %s -march=x86-64 -tailcallopt | grep movl | grep {7} | grep edx
+; CHECK: movl $7, %edx
+
; Expect argument 6 to be moved directly to register r8.
-; RUN: llc < %s -march=x86-64 -tailcallopt | grep movl | grep {17} | grep r8
+; CHECK: movl $17, %r8d
+
+; Expect not call but jmp to @tailcallee.
+; CHECK: jmp tailcallee
+
+; Expect the trailer.
+; CHECK: .size tailcaller
%struct.s = type { i64, i64, i64, i64, i64, i64, i64, i64,
i64, i64, i64, i64, i64, i64, i64, i64,
@@ -25,5 +40,3 @@ entry:
%tmp4 = tail call fastcc i64 @tailcallee(%struct.s* %a byval, i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
ret i64 %tmp4
}
-
-
diff --git a/llvm/test/CodeGen/X86/v2f32.ll b/llvm/test/CodeGen/X86/v2f32.ll
index 76c3fdf..6d14099 100644
--- a/llvm/test/CodeGen/X86/v2f32.ll
+++ b/llvm/test/CodeGen/X86/v2f32.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=x86-64 -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-linux -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=W64
; RUN: llc < %s -mcpu=yonah -march=x86 -asm-verbose=0 -o - | FileCheck %s -check-prefix=X32
; PR7518
@@ -15,6 +16,13 @@ define void @test1(<2 x float> %Q, float *%P2) nounwind {
; X64-NEXT: movss %xmm1, (%rdi)
; X64-NEXT: ret
+; W64: test1:
+; W64-NEXT: movdqa (%rcx), %xmm0
+; W64-NEXT: pshufd $1, %xmm0, %xmm1
+; W64-NEXT: addss %xmm0, %xmm1
+; W64-NEXT: movss %xmm1, (%rdx)
+; W64-NEXT: ret
+
; X32: test1:
; X32-NEXT: pshufd $1, %xmm0, %xmm1
; X32-NEXT: addss %xmm0, %xmm1
@@ -31,6 +39,14 @@ define <2 x float> @test2(<2 x float> %Q, <2 x float> %R, <2 x float> *%P) nounw
; X64: test2:
; X64-NEXT: addps %xmm1, %xmm0
; X64-NEXT: ret
+
+; W64: test2:
+; W64-NEXT: movaps (%rcx), %xmm0
+; W64-NEXT: addps (%rdx), %xmm0
+; W64-NEXT: ret
+
+; X32: test2:
+; X32: addps %xmm1, %xmm0
}
@@ -38,17 +54,35 @@ define <2 x float> @test3(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
ret <2 x float> %C
-; CHECK: test3:
-; CHECK-NEXT: addps %xmm0, %xmm0
-; CHECK-NEXT: ret
+; X64: test3:
+; X64-NEXT: addps %xmm0, %xmm0
+; X64-NEXT: ret
+
+; W64: test3:
+; W64-NEXT: movaps (%rcx), %xmm0
+; W64-NEXT: addps %xmm0, %xmm0
+; W64-NEXT: ret
+
+; X32: test3:
+; X32-NEXT: addps %xmm0, %xmm0
+; X32-NEXT: ret
}
define <2 x float> @test4(<2 x float> %A) nounwind {
%C = fadd <2 x float> %A, %A
ret <2 x float> %C
-; CHECK: test4:
-; CHECK-NEXT: addps %xmm0, %xmm0
-; CHECK-NEXT: ret
+; X64: test4:
+; X64-NEXT: addps %xmm0, %xmm0
+; X64-NEXT: ret
+
+; W64: test4:
+; W64-NEXT: movaps (%rcx), %xmm0
+; W64-NEXT: addps %xmm0, %xmm0
+; W64-NEXT: ret
+
+; X32: test4:
+; X32-NEXT: addps %xmm0, %xmm0
+; X32-NEXT: ret
}
define <4 x float> @test5(<4 x float> %A) nounwind {
@@ -61,10 +95,21 @@ BB:
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
-; CHECK: _test5:
-; CHECK-NEXT: addps %xmm0, %xmm0
-; CHECK-NEXT: addps %xmm0, %xmm0
-; CHECK-NEXT: ret
+; X64: test5:
+; X64-NEXT: addps %xmm0, %xmm0
+; X64-NEXT: addps %xmm0, %xmm0
+; X64-NEXT: ret
+
+; W64: test5:
+; W64-NEXT: movaps (%rcx), %xmm0
+; W64-NEXT: addps %xmm0, %xmm0
+; W64-NEXT: addps %xmm0, %xmm0
+; W64-NEXT: ret
+
+; X32: test5:
+; X32-NEXT: addps %xmm0, %xmm0
+; X32-NEXT: addps %xmm0, %xmm0
+; X32-NEXT: ret
}
diff --git a/llvm/test/CodeGen/X86/vec_set-8.ll b/llvm/test/CodeGen/X86/vec_set-8.ll
index 9697f11..66056d0 100644
--- a/llvm/test/CodeGen/X86/vec_set-8.ll
+++ b/llvm/test/CodeGen/X86/vec_set-8.ll
@@ -1,5 +1,8 @@
-; RUN: llc < %s -march=x86-64 | not grep movsd
-; RUN: llc < %s -march=x86-64 | grep {movd.*%rdi,.*%xmm0}
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s
+; CHECK-NOT: movsd
+; CHECK: movd {{%rdi|%rcx}}, %xmm0
+; CHECK-NOT: movsd
define <2 x i64> @test(i64 %i) nounwind {
entry:
diff --git a/llvm/test/CodeGen/X86/vec_shuffle-17.ll b/llvm/test/CodeGen/X86/vec_shuffle-17.ll
index 9c33abb..ebc8c5b 100644
--- a/llvm/test/CodeGen/X86/vec_shuffle-17.ll
+++ b/llvm/test/CodeGen/X86/vec_shuffle-17.ll
@@ -1,5 +1,8 @@
-; RUN: llc < %s -march=x86-64 | grep {movd.*%rdi, %xmm0}
-; RUN: llc < %s -march=x86-64 | not grep xor
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s
+; CHECK-NOT: xor
+; CHECK: movd {{%rdi|%rcx}}, %xmm0
+; CHECK-NOT: xor
; PR2108
define <2 x i64> @doload64(i64 %x) nounwind {
diff --git a/llvm/test/CodeGen/X86/widen_load-0.ll b/llvm/test/CodeGen/X86/widen_load-0.ll
index f6c4af0..82c8252 100644
--- a/llvm/test/CodeGen/X86/widen_load-0.ll
+++ b/llvm/test/CodeGen/X86/widen_load-0.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -o - -march=x86-64 | FileCheck %s
+; RUN: llc < %s -o - -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -o - -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
; PR4891
; Both loads should happen before either store.
@@ -8,6 +9,11 @@
; CHECK: movl %ecx, (%rdi)
; CHECK: movl %eax, (%rsi)
+; WIN64: movl (%rcx), %eax
+; WIN64: movl (%rdx), %esi
+; WIN64: movl %esi, (%rcx)
+; WIN64: movl %eax, (%rdx)
+
define void @short2_int_swap(<2 x i16>* nocapture %b, i32* nocapture %c) nounwind {
entry:
%0 = load <2 x i16>* %b, align 2 ; <<2 x i16>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/x86-64-malloc.ll b/llvm/test/CodeGen/X86/x86-64-malloc.ll
index b4f1fa6..4aa0ec3 100644
--- a/llvm/test/CodeGen/X86/x86-64-malloc.ll
+++ b/llvm/test/CodeGen/X86/x86-64-malloc.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=x86-64 | grep {shll.*3, %edi}
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s
+; CHECK: shll $3, {{%edi|%ecx}}
; PR3829
; The generated code should multiply by 3 (sizeof i8*) as an i32,
; not as an i64!
diff --git a/llvm/utils/lit/lit/TestingConfig.py b/llvm/utils/lit/lit/TestingConfig.py
index c7a03dd..25bb341 100644
--- a/llvm/utils/lit/lit/TestingConfig.py
+++ b/llvm/utils/lit/lit/TestingConfig.py
@@ -17,7 +17,6 @@ class TestingConfig:
'PATHEXT' : os.environ.get('PATHEXT',''),
'SYSTEMROOT' : os.environ.get('SYSTEMROOT',''),
'LLVM_DISABLE_CRT_DEBUG' : '1',
- 'PRINTF_EXPONENT_DIGITS' : '2',
'PYTHONUNBUFFERED' : '1',
}