aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2022-08-23 13:32:01 -0700
committerH.J. Lu <hjl.tools@gmail.com>2022-08-23 13:58:35 -0700
commit4ade41de1a6c80db6bb61399da6bff2126813d75 (patch)
tree780b62368117a5b71608320d75c6761210e63341
parentc0dc8533675ca58a603833a3b4e072a82172748f (diff)
downloadgcc-4ade41de1a6c80db6bb61399da6bff2126813d75.zip
gcc-4ade41de1a6c80db6bb61399da6bff2126813d75.tar.gz
gcc-4ade41de1a6c80db6bb61399da6bff2126813d75.tar.bz2
x86: Replace vmovdqu with movdqu in BF16 XMM ABI tests
Since XMM BF16 tests only require SSE2, replace vmovdqu with movdqu in BF16 XMM ABI tests to support SSE2 machines without AVX. Tested on x86-64 machines with and without AVX. * gcc.target/x86_64/abi/bf16/asm-support.S: Replace vmovdqu with movdqu.
-rw-r--r--gcc/testsuite/gcc.target/x86_64/abi/bf16/asm-support.S36
1 files changed, 18 insertions, 18 deletions
diff --git a/gcc/testsuite/gcc.target/x86_64/abi/bf16/asm-support.S b/gcc/testsuite/gcc.target/x86_64/abi/bf16/asm-support.S
index a8165d8..7559aa9 100644
--- a/gcc/testsuite/gcc.target/x86_64/abi/bf16/asm-support.S
+++ b/gcc/testsuite/gcc.target/x86_64/abi/bf16/asm-support.S
@@ -20,22 +20,22 @@ snapshot:
movq %r13, r13(%rip)
movq %r14, r14(%rip)
movq %r15, r15(%rip)
- vmovdqu %xmm0, xmm_regs+0(%rip)
- vmovdqu %xmm1, xmm_regs+16(%rip)
- vmovdqu %xmm2, xmm_regs+32(%rip)
- vmovdqu %xmm3, xmm_regs+48(%rip)
- vmovdqu %xmm4, xmm_regs+64(%rip)
- vmovdqu %xmm5, xmm_regs+80(%rip)
- vmovdqu %xmm6, xmm_regs+96(%rip)
- vmovdqu %xmm7, xmm_regs+112(%rip)
- vmovdqu %xmm8, xmm_regs+128(%rip)
- vmovdqu %xmm9, xmm_regs+144(%rip)
- vmovdqu %xmm10, xmm_regs+160(%rip)
- vmovdqu %xmm11, xmm_regs+176(%rip)
- vmovdqu %xmm12, xmm_regs+192(%rip)
- vmovdqu %xmm13, xmm_regs+208(%rip)
- vmovdqu %xmm14, xmm_regs+224(%rip)
- vmovdqu %xmm15, xmm_regs+240(%rip)
+ movdqu %xmm0, xmm_regs+0(%rip)
+ movdqu %xmm1, xmm_regs+16(%rip)
+ movdqu %xmm2, xmm_regs+32(%rip)
+ movdqu %xmm3, xmm_regs+48(%rip)
+ movdqu %xmm4, xmm_regs+64(%rip)
+ movdqu %xmm5, xmm_regs+80(%rip)
+ movdqu %xmm6, xmm_regs+96(%rip)
+ movdqu %xmm7, xmm_regs+112(%rip)
+ movdqu %xmm8, xmm_regs+128(%rip)
+ movdqu %xmm9, xmm_regs+144(%rip)
+ movdqu %xmm10, xmm_regs+160(%rip)
+ movdqu %xmm11, xmm_regs+176(%rip)
+ movdqu %xmm12, xmm_regs+192(%rip)
+ movdqu %xmm13, xmm_regs+208(%rip)
+ movdqu %xmm14, xmm_regs+224(%rip)
+ movdqu %xmm15, xmm_regs+240(%rip)
jmp *callthis(%rip)
.LFE3:
.size snapshot, .-snapshot
@@ -50,8 +50,8 @@ snapshot_ret:
addq $8, %rsp
movq %rax, rax(%rip)
movq %rdx, rdx(%rip)
- vmovdqu %xmm0, xmm_regs+0(%rip)
- vmovdqu %xmm1, xmm_regs+16(%rip)
+ movdqu %xmm0, xmm_regs+0(%rip)
+ movdqu %xmm1, xmm_regs+16(%rip)
fstpt x87_regs(%rip)
fstpt x87_regs+16(%rip)
fldt x87_regs+16(%rip)