diff options
Diffstat (limited to 'llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir')
-rw-r--r-- | llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir | 1589 |
1 files changed, 1589 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir new file mode 100644 index 0000000..3a2d40f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir @@ -0,0 +1,1589 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=RV32 %s +# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=RV64 %s + +# Extend from s1 element vectors +--- +name: anyext_nxv1i8_nxv1i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv1i8_nxv1i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv1i8_nxv1i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s1>) = COPY $v0 + %0:_(<vscale x 1 x s8>) = G_ANYEXT %1(<vscale x 1 x s1>) + $v8 = COPY %0(<vscale x 1 x s8>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv1i16_nxv1i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv1i16_nxv1i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv1i16_nxv1i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s1>) = COPY $v0 + %0:_(<vscale x 1 x s16>) = G_ANYEXT %1(<vscale x 1 x s1>) + $v8 = COPY %0(<vscale x 1 x s16>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv1i32_nxv1i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv1i32_nxv1i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv1i32_nxv1i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s1>) = COPY $v0 + %0:_(<vscale x 1 x s32>) = G_ANYEXT %1(<vscale x 1 x s1>) + $v8 = COPY %0(<vscale x 1 x s32>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv1i64_nxv1i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv1i64_nxv1i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32) + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[MV]](s64) + ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32) + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[MV1]](s64) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv1i64_nxv1i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s1>) = COPY $v0 + %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s1>) + $v8 = COPY %0(<vscale x 1 x s64>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv2i8_nxv2i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv2i8_nxv2i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv2i8_nxv2i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s1>) = COPY $v0 + %0:_(<vscale x 2 x s8>) = G_ANYEXT %1(<vscale x 2 x s1>) + $v8 = COPY %0(<vscale x 2 x s8>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv2i16_nxv2i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv2i16_nxv2i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv2i16_nxv2i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s1>) = COPY $v0 + %0:_(<vscale x 2 x s16>) = G_ANYEXT %1(<vscale x 2 x s1>) + $v8 = COPY %0(<vscale x 2 x s16>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv2i32_nxv2i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv2i32_nxv2i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv2i32_nxv2i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s1>) = COPY $v0 + %0:_(<vscale x 2 x s32>) = G_ANYEXT %1(<vscale x 2 x s1>) + $v8 = COPY %0(<vscale x 2 x s32>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv2i64_nxv2i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv2i64_nxv2i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32) + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[MV]](s64) + ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32) + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[MV1]](s64) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: anyext_nxv2i64_nxv2i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 2 x s1>) = COPY $v0 + %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s1>) + $v8m2 = COPY %0(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 +... +--- +name: anyext_nxv4i8_nxv4i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv4i8_nxv4i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv4i8_nxv4i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 4 x s1>) = COPY $v0 + %0:_(<vscale x 4 x s8>) = G_ANYEXT %1(<vscale x 4 x s1>) + $v8 = COPY %0(<vscale x 4 x s8>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv4i16_nxv4i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv4i16_nxv4i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv4i16_nxv4i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 4 x s1>) = COPY $v0 + %0:_(<vscale x 4 x s16>) = G_ANYEXT %1(<vscale x 4 x s1>) + $v8 = COPY %0(<vscale x 4 x s16>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv4i32_nxv4i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv4i32_nxv4i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: anyext_nxv4i32_nxv4i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 4 x s1>) = COPY $v0 + %0:_(<vscale x 4 x s32>) = G_ANYEXT %1(<vscale x 4 x s1>) + $v8m2 = COPY %0(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 +... +--- +name: anyext_nxv4i64_nxv4i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv4i64_nxv4i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32) + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[MV]](s64) + ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32) + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[MV1]](s64) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: anyext_nxv4i64_nxv4i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 4 x s1>) = COPY $v0 + %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s1>) + $v8m4 = COPY %0(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 +... +--- +name: anyext_nxv8i8_nxv8i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv8i8_nxv8i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv8i8_nxv8i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 8 x s1>) = COPY $v0 + %0:_(<vscale x 8 x s8>) = G_ANYEXT %1(<vscale x 8 x s1>) + $v8 = COPY %0(<vscale x 8 x s8>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv8i16_nxv8i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv8i16_nxv8i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: anyext_nxv8i16_nxv8i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 8 x s1>) = COPY $v0 + %0:_(<vscale x 8 x s16>) = G_ANYEXT %1(<vscale x 8 x s1>) + $v8m2 = COPY %0(<vscale x 8 x s16>) + PseudoRET implicit $v8m2 +... +--- +name: anyext_nxv8i32_nxv8i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv8i32_nxv8i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: anyext_nxv8i32_nxv8i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 8 x s1>) = COPY $v0 + %0:_(<vscale x 8 x s32>) = G_ANYEXT %1(<vscale x 8 x s1>) + $v8m4 = COPY %0(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 +... +--- +name: anyext_nxv8i64_nxv8i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv8i64_nxv8i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32) + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[MV]](s64) + ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C3]](s32) + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[MV1]](s64) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: anyext_nxv8i64_nxv8i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 8 x s1>) = COPY $v0 + %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s1>) + $v8m8 = COPY %0(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 +... +--- +name: anyext_nxv16i8_nxv16i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv16i8_nxv16i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: anyext_nxv16i8_nxv16i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 16 x s1>) = COPY $v0 + %0:_(<vscale x 16 x s8>) = G_ANYEXT %1(<vscale x 16 x s1>) + $v8m2 = COPY %0(<vscale x 16 x s8>) + PseudoRET implicit $v8m2 +... +--- +name: anyext_nxv16i16_nxv16i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv16i16_nxv16i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: anyext_nxv16i16_nxv16i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 16 x s1>) = COPY $v0 + %0:_(<vscale x 16 x s16>) = G_ANYEXT %1(<vscale x 16 x s1>) + $v8m4 = COPY %0(<vscale x 16 x s16>) + PseudoRET implicit $v8m4 +... +--- +name: anyext_nxv16i32_nxv16i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv16i32_nxv16i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: anyext_nxv16i32_nxv16i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 16 x s1>) = COPY $v0 + %0:_(<vscale x 16 x s32>) = G_ANYEXT %1(<vscale x 16 x s1>) + $v8m8 = COPY %0(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 +... +--- +name: anyext_nxv32i8_nxv32i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv32i8_nxv32i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: anyext_nxv32i8_nxv32i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 32 x s1>) = COPY $v0 + %0:_(<vscale x 32 x s8>) = G_ANYEXT %1(<vscale x 32 x s1>) + $v8m4 = COPY %0(<vscale x 32 x s8>) + PseudoRET implicit $v8m4 +... +--- +name: anyext_nxv32i16_nxv32i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv32i16_nxv32i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: anyext_nxv32i16_nxv32i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 32 x s1>) = COPY $v0 + %0:_(<vscale x 32 x s16>) = G_ANYEXT %1(<vscale x 32 x s1>) + $v8m8 = COPY %0(<vscale x 32 x s16>) + PseudoRET implicit $v8m8 +... +--- +name: anyext_nxv64i8_nxv64i1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0 + ; RV32-LABEL: name: anyext_nxv64i8_nxv64i1 + ; RV32: liveins: $v0 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0 + ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s32) + ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s32) + ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[COPY]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV32-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: anyext_nxv64i8_nxv64i1 + ; RV64: liveins: $v0 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0 + ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64) + ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64) + ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[COPY]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]] + ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 64 x s1>) = COPY $v0 + %0:_(<vscale x 64 x s8>) = G_ANYEXT %1(<vscale x 64 x s1>) + $v8m8 = COPY %0(<vscale x 64 x s8>) + PseudoRET implicit $v8m8 +... + +# Extend from s8 element vectors +--- +name: anyext_nxv1i16_nxv1i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv1i16_nxv1i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>) + ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv1i16_nxv1i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>) + ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s8>) = COPY $v8 + %0:_(<vscale x 1 x s16>) = G_ANYEXT %1(<vscale x 1 x s8>) + $v8 = COPY %0(<vscale x 1 x s16>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv1i32_nxv1i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv1i32_nxv1i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>) + ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv1i32_nxv1i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>) + ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s8>) = COPY $v8 + %0:_(<vscale x 1 x s32>) = G_ANYEXT %1(<vscale x 1 x s8>) + $v8 = COPY %0(<vscale x 1 x s32>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv1i64_nxv1i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv1i64_nxv1i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>) + ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv1i64_nxv1i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s8>) + ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s8>) = COPY $v8 + %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s8>) + $v8 = COPY %0(<vscale x 1 x s64>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv2i16_nxv2i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv2i16_nxv2i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>) + ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv2i16_nxv2i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>) + ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s8>) = COPY $v8 + %0:_(<vscale x 2 x s16>) = G_ANYEXT %1(<vscale x 2 x s8>) + $v8 = COPY %0(<vscale x 2 x s16>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv2i32_nxv2i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv2i32_nxv2i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>) + ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv2i32_nxv2i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>) + ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s8>) = COPY $v8 + %0:_(<vscale x 2 x s32>) = G_ANYEXT %1(<vscale x 2 x s8>) + $v8 = COPY %0(<vscale x 2 x s32>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv2i64_nxv2i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv2i64_nxv2i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>) + ; RV32-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: anyext_nxv2i64_nxv2i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s8>) + ; RV64-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 2 x s8>) = COPY $v8 + %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s8>) + $v8m2 = COPY %0(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 +... +--- +name: anyext_nxv4i16_nxv4i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv4i16_nxv4i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>) + ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv4i16_nxv4i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>) + ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 4 x s8>) = COPY $v8 + %0:_(<vscale x 4 x s16>) = G_ANYEXT %1(<vscale x 4 x s8>) + $v8 = COPY %0(<vscale x 4 x s16>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv4i32_nxv4i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv4i32_nxv4i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>) + ; RV32-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: anyext_nxv4i32_nxv4i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>) + ; RV64-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 4 x s8>) = COPY $v8 + %0:_(<vscale x 4 x s32>) = G_ANYEXT %1(<vscale x 4 x s8>) + $v8m2 = COPY %0(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 +... +--- +name: anyext_nxv4i64_nxv4i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv4i64_nxv4i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>) + ; RV32-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: anyext_nxv4i64_nxv4i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s8>) + ; RV64-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 4 x s8>) = COPY $v8 + %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s8>) + $v8m4 = COPY %0(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 +... +--- +name: anyext_nxv8i16_nxv8i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv8i16_nxv8i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>) + ; RV32-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 8 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: anyext_nxv8i16_nxv8i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>) + ; RV64-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 8 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 8 x s8>) = COPY $v8 + %0:_(<vscale x 8 x s16>) = G_ANYEXT %1(<vscale x 8 x s8>) + $v8m2 = COPY %0(<vscale x 8 x s16>) + PseudoRET implicit $v8m2 +... +--- +name: anyext_nxv8i32_nxv8i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv8i32_nxv8i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>) + ; RV32-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: anyext_nxv8i32_nxv8i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>) + ; RV64-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 8 x s8>) = COPY $v8 + %0:_(<vscale x 8 x s32>) = G_ANYEXT %1(<vscale x 8 x s8>) + $v8m4 = COPY %0(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 +... +--- +name: anyext_nxv8i64_nxv8i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv8i64_nxv8i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>) + ; RV32-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: anyext_nxv8i64_nxv8i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s8>) + ; RV64-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 8 x s8>) = COPY $v8 + %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s8>) + $v8m8 = COPY %0(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 +... +--- +name: anyext_nxv16i16_nxv16i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv16i16_nxv16i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>) + ; RV32-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 16 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: anyext_nxv16i16_nxv16i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>) + ; RV64-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 16 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 16 x s8>) = COPY $v8m2 + %0:_(<vscale x 16 x s16>) = G_ANYEXT %1(<vscale x 16 x s8>) + $v8m4 = COPY %0(<vscale x 16 x s16>) + PseudoRET implicit $v8m4 +... +--- +name: anyext_nxv16i32_nxv16i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv16i32_nxv16i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m4 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>) + ; RV32-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: anyext_nxv16i32_nxv16i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m4 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s8>) + ; RV64-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 16 x s8>) = COPY $v8m4 + %0:_(<vscale x 16 x s32>) = G_ANYEXT %1(<vscale x 16 x s8>) + $v8m8 = COPY %0(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 +... +--- +name: anyext_nxv32i16_nxv32i8 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv32i16_nxv32i8 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ANYEXT [[COPY]](<vscale x 32 x s8>) + ; RV32-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 32 x s16>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: anyext_nxv32i16_nxv32i8 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ANYEXT [[COPY]](<vscale x 32 x s8>) + ; RV64-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 32 x s16>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 32 x s8>) = COPY $v8m4 + %0:_(<vscale x 32 x s16>) = G_ANYEXT %1(<vscale x 32 x s8>) + $v8m8 = COPY %0(<vscale x 32 x s16>) + PseudoRET implicit $v8m8 +... + +# Extend from s16 element vectors +--- +name: anyext_nxv1i32_nxv1i16 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv1i32_nxv1i16 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>) + ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv1i32_nxv1i16 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>) + ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s16>) = COPY $v8 + %0:_(<vscale x 1 x s32>) = G_ANYEXT %1(<vscale x 1 x s16>) + $v8 = COPY %0(<vscale x 1 x s32>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv1i64_nxv1i16 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv1i64_nxv1i16 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>) + ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv1i64_nxv1i16 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s16>) + ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s16>) = COPY $v8 + %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s16>) + $v8 = COPY %0(<vscale x 1 x s64>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv2i32_nxv2i16 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv2i32_nxv2i16 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>) + ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv2i32_nxv2i16 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>) + ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s16>) = COPY $v8 + %0:_(<vscale x 2 x s32>) = G_ANYEXT %1(<vscale x 2 x s16>) + $v8 = COPY %0(<vscale x 2 x s32>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv2i64_nxv2i16 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv2i64_nxv2i16 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>) + ; RV32-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: anyext_nxv2i64_nxv2i16 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s16>) + ; RV64-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 2 x s16>) = COPY $v8 + %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s16>) + $v8m2 = COPY %0(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 +... +--- +name: anyext_nxv4i32_nxv4i16 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv4i32_nxv4i16 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>) + ; RV32-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: anyext_nxv4i32_nxv4i16 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>) + ; RV64-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 4 x s16>) = COPY $v8 + %0:_(<vscale x 4 x s32>) = G_ANYEXT %1(<vscale x 4 x s16>) + $v8m2 = COPY %0(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 +... +--- +name: anyext_nxv4i64_nxv4i16 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv4i64_nxv4i16 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>) + ; RV32-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: anyext_nxv4i64_nxv4i16 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s16>) + ; RV64-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 4 x s16>) = COPY $v8 + %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s16>) + $v8m4 = COPY %0(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 +... +--- +name: anyext_nxv8i32_nxv8i16 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv8i32_nxv8i16 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>) + ; RV32-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: anyext_nxv8i32_nxv8i16 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>) + ; RV64-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 8 x s16>) = COPY $v8m2 + %0:_(<vscale x 8 x s32>) = G_ANYEXT %1(<vscale x 8 x s16>) + $v8m4 = COPY %0(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 +... +--- +name: anyext_nxv8i64_nxv8i16 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv8i64_nxv8i16 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>) + ; RV32-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: anyext_nxv8i64_nxv8i16 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s16>) + ; RV64-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 8 x s16>) = COPY $v8m2 + %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s16>) + $v8m8 = COPY %0(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 +... +--- +name: anyext_nxv16i32_nxv16i16 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv16i32_nxv16i16 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s16>) + ; RV32-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: anyext_nxv16i32_nxv16i16 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ANYEXT [[COPY]](<vscale x 16 x s16>) + ; RV64-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 16 x s16>) = COPY $v8m4 + %0:_(<vscale x 16 x s32>) = G_ANYEXT %1(<vscale x 16 x s16>) + $v8m8 = COPY %0(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 +... + +# Extend from s32 element vectors +--- +name: anyext_nxv1i64_nxv1i32 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv1i64_nxv1i32 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s32>) + ; RV32-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8 + ; + ; RV64-LABEL: name: anyext_nxv1i64_nxv1i32 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ANYEXT [[COPY]](<vscale x 1 x s32>) + ; RV64-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s32>) = COPY $v8 + %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s32>) + $v8 = COPY %0(<vscale x 1 x s64>) + PseudoRET implicit $v8 +... +--- +name: anyext_nxv2i64_nxv2i32 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv2i64_nxv2i32 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s32>) + ; RV32-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m2 + ; + ; RV64-LABEL: name: anyext_nxv2i64_nxv2i32 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ANYEXT [[COPY]](<vscale x 2 x s32>) + ; RV64-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 2 x s32>) = COPY $v8 + %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s32>) + $v8m2 = COPY %0(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 +... +--- +name: anyext_nxv4i64_nxv4i32 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv4i64_nxv4i32 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s32>) + ; RV32-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m4 + ; + ; RV64-LABEL: name: anyext_nxv4i64_nxv4i32 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ANYEXT [[COPY]](<vscale x 4 x s32>) + ; RV64-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 4 x s32>) = COPY $v8m2 + %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s32>) + $v8m4 = COPY %0(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 +... +--- +name: anyext_nxv8i64_nxv8i32 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8 + ; RV32-LABEL: name: anyext_nxv8i64_nxv8i32 + ; RV32: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4 + ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s32>) + ; RV32-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>) + ; RV32-NEXT: PseudoRET implicit $v8m8 + ; + ; RV64-LABEL: name: anyext_nxv8i64_nxv8i32 + ; RV64: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4 + ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ANYEXT [[COPY]](<vscale x 8 x s32>) + ; RV64-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>) + ; RV64-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 8 x s32>) = COPY $v8m4 + %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s32>) + $v8m8 = COPY %0(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 +... |