aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2017-09-07 05:08:16 +0000
committerCraig Topper <craig.topper@intel.com>2017-09-07 05:08:16 +0000
commit9228aee711bdeb5b4f43073bbfc1763d860e3d0b (patch)
tree03cdcc85f899a54db1275da495918989e076c6fb /llvm/lib
parent064028bb0546e42aa9643000a21d6e900bbadfbe (diff)
downloadllvm-9228aee711bdeb5b4f43073bbfc1763d860e3d0b.zip
llvm-9228aee711bdeb5b4f43073bbfc1763d860e3d0b.tar.gz
llvm-9228aee711bdeb5b4f43073bbfc1763d860e3d0b.tar.bz2
[X86] Remove patterns for selecting a v8f32 X86ISD::MOVSS or v4f64 X86ISD::MOVSD.
I don't think we ever generate these. If we did, I would expect we would also be able to generate v16f32 and v8f64, but we don't have those patterns. llvm-svn: 312694
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td24
-rw-r--r--llvm/lib/Target/X86/X86InstrSSE.td24
2 files changed, 0 insertions, 48 deletions
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 80b92d5..632fd3f 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -4283,36 +4283,12 @@ let Predicates = [HasAVX512] in {
(VMOVSSZrr (v4f32 VR128X:$src1),
(COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>;
- // 256-bit variants
- def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)),
- (SUBREG_TO_REG (i32 0),
- (VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm),
- (EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)),
- sub_xmm)>;
- def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)),
- (SUBREG_TO_REG (i32 0),
- (VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm),
- (EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)),
- sub_xmm)>;
-
// Shuffle with VMOVSD
def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)),
(VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)),
(VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
- // 256-bit variants
- def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)),
- (SUBREG_TO_REG (i32 0),
- (VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm),
- (EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)),
- sub_xmm)>;
- def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)),
- (SUBREG_TO_REG (i32 0),
- (VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm),
- (EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)),
- sub_xmm)>;
-
def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
(VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index c852aa8..9fa3124 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -512,36 +512,12 @@ let Predicates = [UseAVX] in {
(VMOVSSrr (v4f32 VR128:$src1),
(COPY_TO_REGCLASS (v4f32 VR128:$src2), FR32))>;
- // 256-bit variants
- def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
- (SUBREG_TO_REG (i32 0),
- (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_xmm),
- (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_xmm)),
- sub_xmm)>;
- def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
- (SUBREG_TO_REG (i32 0),
- (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_xmm),
- (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_xmm)),
- sub_xmm)>;
-
// Shuffle with VMOVSD
def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
(VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
(VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
- // 256-bit variants
- def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
- (SUBREG_TO_REG (i32 0),
- (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_xmm),
- (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_xmm)),
- sub_xmm)>;
- def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
- (SUBREG_TO_REG (i32 0),
- (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_xmm),
- (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),
- sub_xmm)>;
-
// FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
// is during lowering, where it's not possible to recognize the fold cause
// it has two uses through a bitcast. One use disappears at isel time and the