diff options
Diffstat (limited to 'llvm/test/CodeGen')
259 files changed, 28514 insertions, 14833 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-add.mir b/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-add.mir new file mode 100644 index 0000000..824ada1 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-add.mir @@ -0,0 +1,278 @@ +# NOTE: Assertions have been autogenerated by utils/update_givaluetracking_test_checks.py UTC_ARGS: --version 5 +# RUN: llc -mtriple=aarch64 -passes="print<gisel-value-tracking>" -filetype=null %s 2>&1 | FileCheck %s + +--- +name: Cst +body: | + bb.1: + ; CHECK-LABEL: name: @Cst + ; CHECK-NEXT: %0:_ KnownBits:00000010 SignBits:6 + ; CHECK-NEXT: %1:_ KnownBits:00011000 SignBits:3 + ; CHECK-NEXT: %2:_ KnownBits:00011010 SignBits:3 + %0:_(s8) = G_CONSTANT i8 2 + %1:_(s8) = G_CONSTANT i8 24 + %2:_(s8) = G_ADD %0, %1 +... +--- +name: CstZero +body: | + bb.1: + ; CHECK-LABEL: name: @CstZero + ; CHECK-NEXT: %0:_ KnownBits:00000001 SignBits:7 + ; CHECK-NEXT: %1:_ KnownBits:11111111 SignBits:8 + ; CHECK-NEXT: %2:_ KnownBits:00000000 SignBits:8 + %0:_(s8) = G_CONSTANT i8 1 + %1:_(s8) = G_CONSTANT i8 255 + %2:_(s8) = G_ADD %0, %1 +... +--- +name: CstNegOne +body: | + bb.1: + ; CHECK-LABEL: name: @CstNegOne + ; CHECK-NEXT: %0:_ KnownBits:00000000 SignBits:8 + ; CHECK-NEXT: %1:_ KnownBits:11111111 SignBits:8 + ; CHECK-NEXT: %2:_ KnownBits:11111111 SignBits:8 + %0:_(s8) = G_CONSTANT i8 0 + %1:_(s8) = G_CONSTANT i8 255 + %2:_(s8) = G_ADD %0, %1 +... +--- +name: CstSeven +body: | + bb.1: + ; CHECK-LABEL: name: @CstSeven + ; CHECK-NEXT: %0:_ KnownBits:00001000 SignBits:4 + ; CHECK-NEXT: %1:_ KnownBits:11111111 SignBits:8 + ; CHECK-NEXT: %2:_ KnownBits:00000111 SignBits:5 + %0:_(s8) = G_CONSTANT i8 8 + %1:_(s8) = G_CONSTANT i8 255 + %2:_(s8) = G_ADD %0, %1 +... +--- +name: CstNeg +body: | + bb.1: + ; CHECK-LABEL: name: @CstNeg + ; CHECK-NEXT: %0:_ KnownBits:11100000 SignBits:3 + ; CHECK-NEXT: %1:_ KnownBits:00000010 SignBits:6 + ; CHECK-NEXT: %2:_ KnownBits:11100010 SignBits:3 + %0:_(s8) = G_CONSTANT i8 224 + %1:_(s8) = G_CONSTANT i8 2 + %2:_(s8) = G_ADD %0, %1 +... +--- +name: ScalarVar +body: | + bb.1: + ; CHECK-LABEL: name: @ScalarVar + ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:???????? SignBits:1 + ; CHECK-NEXT: %2:_ KnownBits:???????? SignBits:1 + %0:_(s8) = COPY $b0 + %1:_(s8) = COPY $b1 + %2:_(s8) = G_ADD %0, %1 +... +--- +name: ScalarRhsEarlyOut +body: | + bb.1: + ; CHECK-LABEL: name: @ScalarRhsEarlyOut + ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:00000011 SignBits:6 + ; CHECK-NEXT: %2:_ KnownBits:???????? SignBits:1 + %0:_(s8) = COPY $b0 + %1:_(s8) = G_CONSTANT i8 3 + %2:_(s8) = G_ADD %0, %1 +... +--- +name: ScalarNonNegative +body: | + bb.1: + ; CHECK-LABEL: name: @ScalarNonNegative + ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:00001111 SignBits:4 + ; CHECK-NEXT: %2:_ KnownBits:0000???? SignBits:4 + ; CHECK-NEXT: %3:_ KnownBits:11111111 SignBits:8 + ; CHECK-NEXT: %4:_ KnownBits:???????? SignBits:4 + %0:_(s8) = COPY $b0 + %1:_(s8) = G_CONSTANT i8 15 + %2:_(s8) = G_AND %0, %1 + %3:_(s8) = G_CONSTANT i8 255 + %4:_(s8) = G_ADD %2, %3 +... +--- +name: ScalarLhsEarlyOut +body: | + bb.1: + ; CHECK-LABEL: name: @ScalarLhsEarlyOut + ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:00000011 SignBits:6 + ; CHECK-NEXT: %2:_ KnownBits:???????? SignBits:1 + %0:_(s8) = COPY $b0 + %1:_(s8) = G_CONSTANT i8 3 + %2:_(s8) = G_ADD %1, %0 +... +--- +name: ScalarPartKnown +body: | + bb.1: + ; CHECK-LABEL: name: @ScalarPartKnown + ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:00001111 SignBits:4 + ; CHECK-NEXT: %2:_ KnownBits:0000???? SignBits:4 + ; CHECK-NEXT: %3:_ KnownBits:00000101 SignBits:5 + ; CHECK-NEXT: %4:_ KnownBits:000????? SignBits:3 + %0:_(s8) = COPY $b0 + %1:_(s8) = G_CONSTANT i8 15 + %2:_(s8) = G_AND %0, %1 + %3:_(s8) = G_CONSTANT i8 5 + %4:_(s8) = G_ADD %2, %3 +... +--- +name: VectorCstZero +body: | + bb.1: + ; CHECK-LABEL: name: @VectorCstZero + ; CHECK-NEXT: %0:_ KnownBits:0000000000000001 SignBits:15 + ; CHECK-NEXT: %1:_ KnownBits:1111111111111111 SignBits:16 + ; CHECK-NEXT: %2:_ KnownBits:0000000000000001 SignBits:15 + ; CHECK-NEXT: %3:_ KnownBits:1111111111111111 SignBits:16 + ; CHECK-NEXT: %4:_ KnownBits:0000000000000000 SignBits:16 + %0:_(s16) = G_CONSTANT i16 1 + %1:_(s16) = G_CONSTANT i16 65535 + %2:_(<4 x s16>) = G_BUILD_VECTOR %0, %0, %0, %0 + %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1 + %4:_(<4 x s16>) = G_ADD %2, %3 +... +--- +name: VectorCstNegOne +body: | + bb.1: + ; CHECK-LABEL: name: @VectorCstNegOne + ; CHECK-NEXT: %0:_ KnownBits:0000000000000000 SignBits:16 + ; CHECK-NEXT: %1:_ KnownBits:1111111111111111 SignBits:16 + ; CHECK-NEXT: %2:_ KnownBits:0000000000000000 SignBits:16 + ; CHECK-NEXT: %3:_ KnownBits:1111111111111111 SignBits:16 + ; CHECK-NEXT: %4:_ KnownBits:1111111111111111 SignBits:16 + %0:_(s16) = G_CONSTANT i16 0 + %1:_(s16) = G_CONSTANT i16 65535 + %2:_(<4 x s16>) = G_BUILD_VECTOR %0, %0, %0, %0 + %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1 + %4:_(<4 x s16>) = G_ADD %2, %3 +... +--- +name: VectorVar +body: | + bb.1: + ; CHECK-LABEL: name: @VectorVar + ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %2:_ KnownBits:???????????????? SignBits:1 + %0:_(<4 x s16>) = COPY $d0 + %1:_(<4 x s16>) = COPY $d1 + %2:_(<4 x s16>) = G_ADD %0, %1 +... +--- +name: VectorRhsEarlyOut +body: | + bb.1: + ; CHECK-LABEL: name: @VectorRhsEarlyOut + ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:0000000000000011 SignBits:14 + ; CHECK-NEXT: %2:_ KnownBits:0000000000000011 SignBits:14 + ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1 + %0:_(<4 x s16>) = COPY $d0 + %1:_(s16) = G_CONSTANT i16 3 + %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1 + %3:_(<4 x s16>) = G_ADD %2, %0 +... +--- +name: VectorNonNegative +body: | + bb.1: + ; CHECK-LABEL: name: @VectorNonNegative + ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:0000000011111111 SignBits:8 + ; CHECK-NEXT: %2:_ KnownBits:0000000011111111 SignBits:8 + ; CHECK-NEXT: %3:_ KnownBits:00000000???????? SignBits:8 + ; CHECK-NEXT: %4:_ KnownBits:1111111111111111 SignBits:16 + ; CHECK-NEXT: %5:_ KnownBits:1111111111111111 SignBits:16 + ; CHECK-NEXT: %6:_ KnownBits:???????????????? SignBits:8 + %0:_(<4 x s16>) = COPY $d0 + %1:_(s16) = G_CONSTANT i16 255 + %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1 + %3:_(<4 x s16>) = G_AND %0, %2 + %4:_(s16) = G_CONSTANT i16 65535 + %5:_(<4 x s16>) = G_BUILD_VECTOR %4, %4, %4, %4 + %6:_(<4 x s16>) = G_ADD %3, %5 +... +--- +name: VectorLhsEarlyOut +body: | + bb.1: + ; CHECK-LABEL: name: @VectorLhsEarlyOut + ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:0000000000000011 SignBits:14 + ; CHECK-NEXT: %2:_ KnownBits:0000000000000011 SignBits:14 + ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1 + %0:_(<4 x s16>) = COPY $d0 + %1:_(s16) = G_CONSTANT i16 3 + %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1 + %3:_(<4 x s16>) = G_ADD %0, %2 +... +--- +name: VectorPartKnown +body: | + bb.1: + ; CHECK-LABEL: name: @VectorPartKnown + ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:0000000011111111 SignBits:8 + ; CHECK-NEXT: %2:_ KnownBits:0000000011111111 SignBits:8 + ; CHECK-NEXT: %3:_ KnownBits:00000000???????? SignBits:8 + ; CHECK-NEXT: %4:_ KnownBits:0000000000101010 SignBits:10 + ; CHECK-NEXT: %5:_ KnownBits:0000000001001010 SignBits:9 + ; CHECK-NEXT: %6:_ KnownBits:000000000??01010 SignBits:9 + ; CHECK-NEXT: %7:_ KnownBits:0000000????????? SignBits:7 + %0:_(<4 x s16>) = COPY $d0 + %1:_(s16) = G_CONSTANT i16 255 + %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1 + %3:_(<4 x s16>) = G_AND %0, %2 + %4:_(s16) = G_CONSTANT i16 42 + %5:_(s16) = G_CONSTANT i16 74 + %6:_(<4 x s16>) = G_BUILD_VECTOR %4, %5, %5, %4 + %7:_(<4 x s16>) = G_ADD %6, %3 +... +--- +name: VectorCst36 +body: | + bb.1: + ; CHECK-LABEL: name: @VectorCst36 + ; CHECK-NEXT: %0:_ KnownBits:0000000000000011 SignBits:14 + ; CHECK-NEXT: %1:_ KnownBits:0000000000000110 SignBits:13 + ; CHECK-NEXT: %2:_ KnownBits:0000000000000?1? SignBits:13 + ; CHECK-NEXT: %3:_ KnownBits:0000000000000?1? SignBits:13 + ; CHECK-NEXT: %4:_ KnownBits:000000000000???? SignBits:12 + %0:_(s16) = G_CONSTANT i16 3 + %1:_(s16) = G_CONSTANT i16 6 + %2:_(<4 x s16>) = G_BUILD_VECTOR %0, %1, %1, %0 + %3:_(<4 x s16>) = G_BUILD_VECTOR %0, %1, %1, %0 + %4:_(<4 x s16>) = G_ADD %2, %3 +... + +--- +name: VectorCst3unknown +body: | + bb.1: + ; CHECK-LABEL: name: @VectorCst3unknown + ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %2:_ KnownBits:0000000000000011 SignBits:14 + ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %4:_ KnownBits:???????????????? SignBits:1 + %0:_(<4 x s16>) = COPY $d0 + %1:_(s16) = COPY $h0 + %2:_(s16) = G_CONSTANT i16 3 + %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %2, %2, %1 + %4:_(<4 x s16>) = G_ADD %0, %3 +... diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-ashr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-ashr.mir index 8552931..ee35447 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-ashr.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-ashr.mir @@ -102,8 +102,8 @@ body: | ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1 ; CHECK-NEXT: %4:_ KnownBits:???????????????? SignBits:1 %0:_(<4 x s16>) = COPY $d0 - %2:_(s16) = COPY $h0 - %1:_(s16) = G_CONSTANT i16 3 + %1:_(s16) = COPY $h0 + %2:_(s16) = G_CONSTANT i16 3 %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %2, %2, %1 %4:_(<4 x s16>) = G_ASHR %0, %3 ... diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-shl.mir b/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-shl.mir index 61d1c43..97bcb80 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-shl.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-shl.mir @@ -135,8 +135,8 @@ body: | ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1 ; CHECK-NEXT: %4:_ KnownBits:???????????????? SignBits:1 %0:_(<4 x s16>) = COPY $d0 - %2:_(s16) = COPY $h0 - %1:_(s16) = G_CONSTANT i16 3 + %1:_(s16) = COPY $h0 + %2:_(s16) = G_CONSTANT i16 3 %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %2, %2, %1 %4:_(<4 x s16>) = G_SHL %0, %3 ... diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-sub.mir b/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-sub.mir new file mode 100644 index 0000000..332049d --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/knownbits-sub.mir @@ -0,0 +1,276 @@ +# NOTE: Assertions have been autogenerated by utils/update_givaluetracking_test_checks.py UTC_ARGS: --version 5 +# RUN: llc -mtriple=aarch64 -passes="print<gisel-value-tracking>" -filetype=null %s 2>&1 | FileCheck %s + +--- +name: Cst +body: | + bb.1: + ; CHECK-LABEL: name: @Cst + ; CHECK-NEXT: %0:_ KnownBits:00000010 SignBits:6 + ; CHECK-NEXT: %1:_ KnownBits:11100000 SignBits:3 + ; CHECK-NEXT: %2:_ KnownBits:00100010 SignBits:2 + %0:_(s8) = G_CONSTANT i8 2 + %1:_(s8) = G_CONSTANT i8 224 + %2:_(s8) = G_SUB %0, %1 +... +--- +name: CstZero +body: | + bb.1: + ; CHECK-LABEL: name: @CstZero + ; CHECK-NEXT: %0:_ KnownBits:00000000 SignBits:8 + ; CHECK-NEXT: %1:_ KnownBits:00000000 SignBits:8 + ; CHECK-NEXT: %2:_ KnownBits:00000000 SignBits:8 + %0:_(s8) = G_CONSTANT i8 0 + %1:_(s8) = G_CONSTANT i8 0 + %2:_(s8) = G_SUB %0, %1 +... +--- +name: CstNegOne +body: | + bb.1: + ; CHECK-LABEL: name: @CstNegOne + ; CHECK-NEXT: %0:_ KnownBits:00000000 SignBits:8 + ; CHECK-NEXT: %1:_ KnownBits:00000001 SignBits:7 + ; CHECK-NEXT: %2:_ KnownBits:11111111 SignBits:8 + %0:_(s8) = G_CONSTANT i8 0 + %1:_(s8) = G_CONSTANT i8 1 + %2:_(s8) = G_SUB %0, %1 +... +--- +name: CstNegFour +body: | + bb.1: + ; CHECK-LABEL: name: @CstNegFour + ; CHECK-NEXT: %0:_ KnownBits:00000000 SignBits:8 + ; CHECK-NEXT: %1:_ KnownBits:00000100 SignBits:5 + ; CHECK-NEXT: %2:_ KnownBits:11111100 SignBits:6 + %0:_(s8) = G_CONSTANT i8 0 + %1:_(s8) = G_CONSTANT i8 4 + %2:_(s8) = G_SUB %0, %1 +... +--- +name: CstNeg +body: | + bb.1: + ; CHECK-LABEL: name: @CstNeg + ; CHECK-NEXT: %0:_ KnownBits:11100000 SignBits:3 + ; CHECK-NEXT: %1:_ KnownBits:00000010 SignBits:6 + ; CHECK-NEXT: %2:_ KnownBits:11011110 SignBits:2 + %0:_(s8) = G_CONSTANT i8 224 + %1:_(s8) = G_CONSTANT i8 2 + %2:_(s8) = G_SUB %0, %1 +... +--- +name: ScalarVar +body: | + bb.1: + ; CHECK-LABEL: name: @ScalarVar + ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:???????? SignBits:1 + ; CHECK-NEXT: %2:_ KnownBits:???????? SignBits:1 + %0:_(s8) = COPY $b0 + %1:_(s8) = COPY $b1 + %2:_(s8) = G_SUB %0, %1 +... +--- +name: ScalarRhsEarlyOut +body: | + bb.1: + ; CHECK-LABEL: name: @ScalarRhsEarlyOut + ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:00000011 SignBits:6 + ; CHECK-NEXT: %2:_ KnownBits:???????? SignBits:1 + %0:_(s8) = COPY $b0 + %1:_(s8) = G_CONSTANT i8 3 + %2:_(s8) = G_SUB %0, %1 +... +--- +name: ScalarNonNegative +body: | + bb.1: + ; CHECK-LABEL: name: @ScalarNonNegative + ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:00001111 SignBits:4 + ; CHECK-NEXT: %2:_ KnownBits:0000???? SignBits:4 + ; CHECK-NEXT: %3:_ KnownBits:00000000 SignBits:8 + ; CHECK-NEXT: %4:_ KnownBits:???????? SignBits:4 + %0:_(s8) = COPY $b0 + %1:_(s8) = G_CONSTANT i8 15 + %2:_(s8) = G_AND %0, %1 + %3:_(s8) = G_CONSTANT i8 0 + %4:_(s8) = G_SUB %3, %2 +... +--- +name: ScalarLhsEarlyOut +body: | + bb.1: + ; CHECK-LABEL: name: @ScalarLhsEarlyOut + ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:00000011 SignBits:6 + ; CHECK-NEXT: %2:_ KnownBits:???????? SignBits:1 + %0:_(s8) = COPY $b0 + %1:_(s8) = G_CONSTANT i8 3 + %2:_(s8) = G_SUB %1, %0 +... +--- +name: ScalarPartKnown +body: | + bb.1: + ; CHECK-LABEL: name: @ScalarPartKnown + ; CHECK-NEXT: %0:_ KnownBits:???????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:00001111 SignBits:4 + ; CHECK-NEXT: %2:_ KnownBits:0000???? SignBits:4 + ; CHECK-NEXT: %3:_ KnownBits:00000101 SignBits:5 + ; CHECK-NEXT: %4:_ KnownBits:???????? SignBits:3 + %0:_(s8) = COPY $b0 + %1:_(s8) = G_CONSTANT i8 15 + %2:_(s8) = G_AND %0, %1 + %3:_(s8) = G_CONSTANT i8 5 + %4:_(s8) = G_SUB %2, %3 +... +--- +name: VectorCstZero +body: | + bb.1: + ; CHECK-LABEL: name: @VectorCstZero + ; CHECK-NEXT: %0:_ KnownBits:0000000000000000 SignBits:16 + ; CHECK-NEXT: %1:_ KnownBits:0000000000000000 SignBits:16 + ; CHECK-NEXT: %2:_ KnownBits:0000000000000000 SignBits:16 + ; CHECK-NEXT: %3:_ KnownBits:0000000000000000 SignBits:16 + %0:_(s16) = G_CONSTANT i16 0 + %1:_(<4 x s16>) = G_BUILD_VECTOR %0, %0, %0, %0 + %2:_(<4 x s16>) = G_BUILD_VECTOR %0, %0, %0, %0 + %3:_(<4 x s16>) = G_SUB %1, %2 +... +--- +name: VectorCstNegOne +body: | + bb.1: + ; CHECK-LABEL: name: @VectorCstNegOne + ; CHECK-NEXT: %0:_ KnownBits:0000000000000000 SignBits:16 + ; CHECK-NEXT: %1:_ KnownBits:0000000000000001 SignBits:15 + ; CHECK-NEXT: %2:_ KnownBits:0000000000000000 SignBits:16 + ; CHECK-NEXT: %3:_ KnownBits:0000000000000001 SignBits:15 + ; CHECK-NEXT: %4:_ KnownBits:1111111111111111 SignBits:16 + %0:_(s16) = G_CONSTANT i16 0 + %1:_(s16) = G_CONSTANT i16 1 + %2:_(<4 x s16>) = G_BUILD_VECTOR %0, %0, %0, %0 + %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1 + %4:_(<4 x s16>) = G_SUB %2, %3 +... +--- +name: VectorVar +body: | + bb.1: + ; CHECK-LABEL: name: @VectorVar + ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %2:_ KnownBits:???????????????? SignBits:1 + %0:_(<4 x s16>) = COPY $d0 + %1:_(<4 x s16>) = COPY $d1 + %2:_(<4 x s16>) = G_SUB %0, %1 +... +--- +name: VectorRhsEarlyOut +body: | + bb.1: + ; CHECK-LABEL: name: @VectorRhsEarlyOut + ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:0000000000000011 SignBits:14 + ; CHECK-NEXT: %2:_ KnownBits:0000000000000011 SignBits:14 + ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1 + %0:_(<4 x s16>) = COPY $d0 + %1:_(s16) = G_CONSTANT i16 3 + %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1 + %3:_(<4 x s16>) = G_SUB %2, %0 +... +--- +name: VectorNonNegative +body: | + bb.1: + ; CHECK-LABEL: name: @VectorNonNegative + ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:0000000011111111 SignBits:8 + ; CHECK-NEXT: %2:_ KnownBits:0000000011111111 SignBits:8 + ; CHECK-NEXT: %3:_ KnownBits:00000000???????? SignBits:8 + ; CHECK-NEXT: %4:_ KnownBits:0000000000000000 SignBits:16 + ; CHECK-NEXT: %5:_ KnownBits:0000000000000000 SignBits:16 + ; CHECK-NEXT: %6:_ KnownBits:???????????????? SignBits:8 + %0:_(<4 x s16>) = COPY $d0 + %1:_(s16) = G_CONSTANT i16 255 + %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1 + %3:_(<4 x s16>) = G_AND %0, %2 + %4:_(s16) = G_CONSTANT i16 0 + %5:_(<4 x s16>) = G_BUILD_VECTOR %4, %4, %4, %4 + %6:_(<4 x s16>) = G_SUB %5, %3 +... +--- +name: VectorLhsEarlyOut +body: | + bb.1: + ; CHECK-LABEL: name: @VectorLhsEarlyOut + ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:0000000000000011 SignBits:14 + ; CHECK-NEXT: %2:_ KnownBits:0000000000000011 SignBits:14 + ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1 + %0:_(<4 x s16>) = COPY $d0 + %1:_(s16) = G_CONSTANT i16 3 + %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1 + %3:_(<4 x s16>) = G_SUB %0, %2 +... +--- +name: VectorPartKnown +body: | + bb.1: + ; CHECK-LABEL: name: @VectorPartKnown + ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:0000000011111111 SignBits:8 + ; CHECK-NEXT: %2:_ KnownBits:0000000011111111 SignBits:8 + ; CHECK-NEXT: %3:_ KnownBits:00000000???????? SignBits:8 + ; CHECK-NEXT: %4:_ KnownBits:0000000000101010 SignBits:10 + ; CHECK-NEXT: %5:_ KnownBits:0000000001001010 SignBits:9 + ; CHECK-NEXT: %6:_ KnownBits:000000000??01010 SignBits:9 + ; CHECK-NEXT: %7:_ KnownBits:???????????????? SignBits:7 + %0:_(<4 x s16>) = COPY $d0 + %1:_(s16) = G_CONSTANT i16 255 + %2:_(<4 x s16>) = G_BUILD_VECTOR %1, %1, %1, %1 + %3:_(<4 x s16>) = G_AND %0, %2 + %4:_(s16) = G_CONSTANT i16 42 + %5:_(s16) = G_CONSTANT i16 74 + %6:_(<4 x s16>) = G_BUILD_VECTOR %4, %5, %5, %4 + %7:_(<4 x s16>) = G_SUB %6, %3 +... +--- +name: VectorCst36 +body: | + bb.1: + ; CHECK-LABEL: name: @VectorCst36 + ; CHECK-NEXT: %0:_ KnownBits:0000000000000011 SignBits:14 + ; CHECK-NEXT: %1:_ KnownBits:0000000000000110 SignBits:13 + ; CHECK-NEXT: %2:_ KnownBits:0000000000000?1? SignBits:13 + ; CHECK-NEXT: %3:_ KnownBits:0000000000000?1? SignBits:13 + ; CHECK-NEXT: %4:_ KnownBits:???????????????? SignBits:12 + %0:_(s16) = G_CONSTANT i16 3 + %1:_(s16) = G_CONSTANT i16 6 + %2:_(<4 x s16>) = G_BUILD_VECTOR %0, %1, %1, %0 + %3:_(<4 x s16>) = G_BUILD_VECTOR %0, %1, %1, %0 + %4:_(<4 x s16>) = G_SUB %2, %3 +... + +--- +name: VectorCst3unknown +body: | + bb.1: + ; CHECK-LABEL: name: @VectorCst3unknown + ; CHECK-NEXT: %0:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %1:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %2:_ KnownBits:0000000000000011 SignBits:14 + ; CHECK-NEXT: %3:_ KnownBits:???????????????? SignBits:1 + ; CHECK-NEXT: %4:_ KnownBits:???????????????? SignBits:1 + %0:_(<4 x s16>) = COPY $d0 + %1:_(s16) = COPY $h0 + %2:_(s16) = G_CONSTANT i16 3 + %3:_(<4 x s16>) = G_BUILD_VECTOR %1, %2, %2, %1 + %4:_(<4 x s16>) = G_SUB %0, %3 +... diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-compress.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-compress.mir index cc75774..c2bf95c 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-compress.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-compress.mir @@ -15,8 +15,9 @@ body: | ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<4 x s32>), [[C1]](s64) + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[C1]](s64) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 - ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C1]], [[C2]] + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY2]], [[C2]] ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[MUL]](s64) ; CHECK-NEXT: G_STORE [[EVEC]](s32), [[PTR_ADD]](p0) :: (store (s32)) ; CHECK-NEXT: [[EVEC1:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY1]](<4 x s16>), [[C1]](s64) @@ -91,7 +92,8 @@ body: | ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32)) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<4 x s32>), [[C3]](s64) - ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[C3]], [[C2]] + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[C3]](s64) + ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[COPY3]], [[C2]] ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[MUL1]](s64) ; CHECK-NEXT: G_STORE [[EVEC]](s32), [[PTR_ADD1]](p0) :: (store (s32)) ; CHECK-NEXT: [[EVEC1:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY1]](<4 x s16>), [[C3]](s64) diff --git a/llvm/test/CodeGen/AArch64/aarch64-matmul.ll b/llvm/test/CodeGen/AArch64/aarch64-matmul.ll index 649d0a9..e7e9ee7 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-matmul.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-matmul.ll @@ -1,41 +1,54 @@ -; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon,+i8mm < %s -o -| FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+neon,+i8mm < %s | FileCheck %s +; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+neon,+i8mm -global-isel < %s | FileCheck %s define <4 x i32> @smmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: smmla.v4i32.v16i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: smmla v0.4s, v1.16b, v2.16b +; CHECK-NEXT: ret entry: -; CHECK-LABEL: smmla.v4i32.v16i8 -; CHECK: smmla v0.4s, v1.16b, v2.16b %vmmla1.i = tail call <4 x i32> @llvm.aarch64.neon.smmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) ret <4 x i32> %vmmla1.i } define <4 x i32> @ummla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: ummla.v4i32.v16i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ummla v0.4s, v1.16b, v2.16b +; CHECK-NEXT: ret entry: -; CHECK-LABEL: ummla.v4i32.v16i8 -; CHECK: ummla v0.4s, v1.16b, v2.16b %vmmla1.i = tail call <4 x i32> @llvm.aarch64.neon.ummla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) ret <4 x i32> %vmmla1.i } define <4 x i32> @usmmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: usmmla.v4i32.v16i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: usmmla v0.4s, v1.16b, v2.16b +; CHECK-NEXT: ret entry: -; CHECK-LABEL: usmmla.v4i32.v16i8 -; CHECK: usmmla v0.4s, v1.16b, v2.16b %vusmmla1.i = tail call <4 x i32> @llvm.aarch64.neon.usmmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) #3 ret <4 x i32> %vusmmla1.i } define <2 x i32> @usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: usdot.v2i32.v8i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: usdot v0.2s, v1.8b, v2.8b +; CHECK-NEXT: ret entry: -; CHECK-LABEL: usdot.v2i32.v8i8 -; CHECK: usdot v0.2s, v1.8b, v2.8b %vusdot1.i = tail call <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) ret <2 x i32> %vusdot1.i } define <2 x i32> @usdot_lane.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: usdot_lane.v2i32.v8i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-NEXT: usdot v0.2s, v1.8b, v2.4b[0] +; CHECK-NEXT: ret entry: -; CHECK-LABEL: usdot_lane.v2i32.v8i8 -; CHECK: usdot v0.2s, v1.8b, v2.4b[0] %0 = bitcast <8 x i8> %b to <2 x i32> %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer %1 = bitcast <2 x i32> %shuffle to <8 x i8> @@ -44,9 +57,12 @@ entry: } define <2 x i32> @sudot_lane.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: sudot_lane.v2i32.v8i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-NEXT: sudot v0.2s, v1.8b, v2.4b[0] +; CHECK-NEXT: ret entry: -; CHECK-LABEL: sudot_lane.v2i32.v8i8 -; CHECK: sudot v0.2s, v1.8b, v2.4b[0] %0 = bitcast <8 x i8> %b to <2 x i32> %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer %1 = bitcast <2 x i32> %shuffle to <8 x i8> @@ -55,9 +71,11 @@ entry: } define <2 x i32> @usdot_lane.v2i32.v16i8(<2 x i32> %r, <8 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: usdot_lane.v2i32.v16i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: usdot v0.2s, v1.8b, v2.4b[0] +; CHECK-NEXT: ret entry: -; CHECK-LABEL: usdot_lane.v2i32.v16i8 -; CHECK: usdot v0.2s, v1.8b, v2.4b[0] %0 = bitcast <16 x i8> %b to <4 x i32> %shuffle = shufflevector <4 x i32> %0, <4 x i32> undef, <2 x i32> zeroinitializer %1 = bitcast <2 x i32> %shuffle to <8 x i8> @@ -66,9 +84,11 @@ entry: } define <2 x i32> @sudot_lane.v2i32.v16i8(<2 x i32> %r, <8 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: sudot_lane.v2i32.v16i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: sudot v0.2s, v1.8b, v2.4b[0] +; CHECK-NEXT: ret entry: -; CHECK-LABEL: sudot_lane.v2i32.v16i8 -; CHECK: sudot v0.2s, v1.8b, v2.4b[0] %0 = bitcast <16 x i8> %b to <4 x i32> %shuffle = shufflevector <4 x i32> %0, <4 x i32> undef, <2 x i32> zeroinitializer %1 = bitcast <2 x i32> %shuffle to <8 x i8> @@ -77,17 +97,22 @@ entry: } define <4 x i32> @usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: usdot.v4i32.v16i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: usdot v0.4s, v1.16b, v2.16b +; CHECK-NEXT: ret entry: -; CHECK-LABEL: usdot.v4i32.v16i8 -; CHECK: usdot v0.4s, v1.16b, v2.16b %vusdot1.i = tail call <4 x i32> @llvm.aarch64.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) #3 ret <4 x i32> %vusdot1.i } define <4 x i32> @usdot_lane.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: usdot_lane.v4i32.v16i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-NEXT: usdot v0.4s, v1.16b, v2.4b[0] +; CHECK-NEXT: ret entry: -; CHECK-LABEL: usdot_lane.v4i32.v16i8 -; CHECK: usdot v0.4s, v1.16b, v2.4b[0] %0 = bitcast <8 x i8> %b to <2 x i32> %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <4 x i32> zeroinitializer %1 = bitcast <4 x i32> %shuffle to <16 x i8> @@ -96,9 +121,12 @@ entry: } define <4 x i32> @sudot_lane.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: sudot_lane.v4i32.v16i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-NEXT: sudot v0.4s, v1.16b, v2.4b[0] +; CHECK-NEXT: ret entry: -; CHECK-LABEL: sudot_lane.v4i32.v16i8 -; CHECK: sudot v0.4s, v1.16b, v2.4b[0] %0 = bitcast <8 x i8> %b to <2 x i32> %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <4 x i32> zeroinitializer %1 = bitcast <4 x i32> %shuffle to <16 x i8> @@ -107,9 +135,11 @@ entry: } define <4 x i32> @usdot_laneq.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: usdot_laneq.v4i32.v16i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: usdot v0.4s, v1.16b, v2.4b[0] +; CHECK-NEXT: ret entry: -; CHECK-LABEL: usdot_laneq.v4i32.v16i8 -; CHECK: usdot v0.4s, v1.16b, v2.4b[0] %0 = bitcast <16 x i8> %b to <4 x i32> %shuffle = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> zeroinitializer %1 = bitcast <4 x i32> %shuffle to <16 x i8> @@ -118,9 +148,11 @@ entry: } define <4 x i32> @sudot_laneq.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: sudot_laneq.v4i32.v16i8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: sudot v0.4s, v1.16b, v2.4b[0] +; CHECK-NEXT: ret entry: -; CHECK-LABEL: sudot_laneq.v4i32.v16i8 -; CHECK: sudot v0.4s, v1.16b, v2.4b[0] %0 = bitcast <16 x i8> %b to <4 x i32> %shuffle = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> zeroinitializer %1 = bitcast <4 x i32> %shuffle to <16 x i8> @@ -133,4 +165,3 @@ declare <4 x i32> @llvm.aarch64.neon.ummla.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 declare <4 x i32> @llvm.aarch64.neon.usmmla.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) #2 declare <2 x i32> @llvm.aarch64.neon.usdot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>) #2 declare <4 x i32> @llvm.aarch64.neon.usdot.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) #2 - diff --git a/llvm/test/CodeGen/AArch64/aarch64-post-coalescer.mir b/llvm/test/CodeGen/AArch64/aarch64-post-coalescer.mir new file mode 100644 index 0000000..6540160 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/aarch64-post-coalescer.mir @@ -0,0 +1,16 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -mtriple=aarch64 -mattr=+sme -run-pass=aarch64-post-coalescer-pass -o - %s | FileCheck %s + +--- +name: foo +machineFunctionInfo: + hasStreamingModeChanges: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: foo + ; CHECK: $d0 = COPY undef %0:fpr64 + ; CHECK-NEXT: FAKE_USE implicit $d0 + %1:fpr64 = COALESCER_BARRIER_FPR64 undef %1 + $d0 = COPY %1 + FAKE_USE implicit $d0 +... diff --git a/llvm/test/CodeGen/AArch64/adds_cmn.ll b/llvm/test/CodeGen/AArch64/adds_cmn.ll index aa070b7..9b456a5 100644 --- a/llvm/test/CodeGen/AArch64/adds_cmn.ll +++ b/llvm/test/CodeGen/AArch64/adds_cmn.ll @@ -22,10 +22,8 @@ entry: define { i32, i32 } @adds_cmn_c(i32 noundef %x, i32 noundef %y) { ; CHECK-LABEL: adds_cmn_c: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn w0, w1 -; CHECK-NEXT: add w1, w1, w0 -; CHECK-NEXT: cset w8, lo -; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: adds w1, w0, w1 +; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret entry: %0 = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) diff --git a/llvm/test/CodeGen/AArch64/arm64-zero-cycle-regmov-fpr.ll b/llvm/test/CodeGen/AArch64/arm64-zero-cycle-regmove-fpr.ll index a0f1b71..bb362d2 100644 --- a/llvm/test/CodeGen/AArch64/arm64-zero-cycle-regmov-fpr.ll +++ b/llvm/test/CodeGen/AArch64/arm64-zero-cycle-regmove-fpr.ll @@ -4,7 +4,7 @@ ; RUN: llc < %s -mtriple=arm64-apple-macosx -mcpu=apple-m1 -mattr=-zcm-fpr128 | FileCheck %s -check-prefixes=NOZCM-FPR128-ATTR --match-full-lines ; RUN: llc < %s -mtriple=arm64-apple-macosx -mattr=+zcm-fpr128 | FileCheck %s -check-prefixes=ZCM-FPR128-ATTR --match-full-lines -define void @zero_cycle_regmov_FPR64(double %a, double %b, double %c, double %d) { +define void @zero_cycle_regmove_FPR64(double %a, double %b, double %c, double %d) { entry: ; CHECK-LABEL: t: ; NOZCM-FPR128-CPU: fmov d0, d2 @@ -45,7 +45,7 @@ entry: declare float @foo_double(double, double) -define void @zero_cycle_regmov_FPR32(float %a, float %b, float %c, float %d) { +define void @zero_cycle_regmove_FPR32(float %a, float %b, float %c, float %d) { entry: ; CHECK-LABEL: t: ; NOZCM-FPR128-CPU: fmov s0, s2 @@ -86,7 +86,7 @@ entry: declare float @foo_float(float, float) -define void @zero_cycle_regmov_FPR16(half %a, half %b, half %c, half %d) { +define void @zero_cycle_regmove_FPR16(half %a, half %b, half %c, half %d) { entry: ; CHECK-LABEL: t: ; NOZCM-FPR128-CPU: fmov s0, s2 diff --git a/llvm/test/CodeGen/AArch64/arm64-zero-cycle-regmov-gpr.ll b/llvm/test/CodeGen/AArch64/arm64-zero-cycle-regmove-gpr.ll index e14e69b..d6d3f15 100644 --- a/llvm/test/CodeGen/AArch64/arm64-zero-cycle-regmov-gpr.ll +++ b/llvm/test/CodeGen/AArch64/arm64-zero-cycle-regmove-gpr.ll @@ -4,7 +4,7 @@ ; RUN: llc < %s -mtriple=arm64-apple-macosx -mcpu=apple-m1 -mattr=-zcm-gpr64 | FileCheck %s -check-prefixes=NOTATTR --match-full-lines ; RUN: llc < %s -mtriple=arm64-apple-macosx -mattr=+zcm-gpr64 | FileCheck %s -check-prefixes=ATTR --match-full-lines -define void @zero_cycle_regmov_GPR32(i32 %a, i32 %b, i32 %c, i32 %d) { +define void @zero_cycle_regmove_GPR32(i32 %a, i32 %b, i32 %c, i32 %d) { entry: ; CHECK-LABEL: t: ; NOTCPU-LINUX: mov w0, w2 diff --git a/llvm/test/CodeGen/AArch64/combine-sdiv.ll b/llvm/test/CodeGen/AArch64/combine-sdiv.ll index dc88f94..cca190f 100644 --- a/llvm/test/CodeGen/AArch64/combine-sdiv.ll +++ b/llvm/test/CodeGen/AArch64/combine-sdiv.ll @@ -1774,3 +1774,88 @@ define i128 @combine_i128_sdiv_const100(i128 %x) { %1 = sdiv i128 %x, 100 ret i128 %1 } + +; The following only becomes an sdiv_by_one after type legalisation, after which +; the splatted scalar constant has a different type to the splat vector. This +; test verifies DAGCombiner does not care about this type difference. +define <16 x i16> @combine_vec_sdiv_by_one_obfuscated(<16 x i16> %x) "target-features"="+sve" { +; CHECK-SD-LABEL: combine_vec_sdiv_by_one_obfuscated: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: combine_vec_sdiv_by_one_obfuscated: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi v2.2d, #0000000000000000 +; CHECK-GI-NEXT: movi v3.8h, #1 +; CHECK-GI-NEXT: smov w8, v0.h[0] +; CHECK-GI-NEXT: mov v3.h[0], v2.h[0] +; CHECK-GI-NEXT: smov w9, v3.h[0] +; CHECK-GI-NEXT: smov w16, v3.h[7] +; CHECK-GI-NEXT: sdiv w14, w8, w9 +; CHECK-GI-NEXT: smov w8, v0.h[1] +; CHECK-GI-NEXT: smov w9, v3.h[1] +; CHECK-GI-NEXT: sdiv w15, w8, w9 +; CHECK-GI-NEXT: smov w8, v0.h[2] +; CHECK-GI-NEXT: smov w9, v3.h[2] +; CHECK-GI-NEXT: sdiv w13, w8, w9 +; CHECK-GI-NEXT: smov w8, v0.h[3] +; CHECK-GI-NEXT: smov w9, v3.h[3] +; CHECK-GI-NEXT: sdiv w12, w8, w9 +; CHECK-GI-NEXT: smov w8, v0.h[4] +; CHECK-GI-NEXT: smov w9, v3.h[4] +; CHECK-GI-NEXT: sdiv w11, w8, w9 +; CHECK-GI-NEXT: smov w8, v0.h[5] +; CHECK-GI-NEXT: smov w9, v3.h[5] +; CHECK-GI-NEXT: sdiv w10, w8, w9 +; CHECK-GI-NEXT: smov w8, v0.h[6] +; CHECK-GI-NEXT: smov w9, v3.h[6] +; CHECK-GI-NEXT: movi v3.8h, #1 +; CHECK-GI-NEXT: smov w17, v3.h[0] +; CHECK-GI-NEXT: smov w18, v3.h[1] +; CHECK-GI-NEXT: smov w0, v3.h[2] +; CHECK-GI-NEXT: smov w1, v3.h[3] +; CHECK-GI-NEXT: smov w2, v3.h[4] +; CHECK-GI-NEXT: smov w3, v3.h[5] +; CHECK-GI-NEXT: sdiv w8, w8, w9 +; CHECK-GI-NEXT: smov w9, v0.h[7] +; CHECK-GI-NEXT: fmov s0, w14 +; CHECK-GI-NEXT: mov v0.h[1], w15 +; CHECK-GI-NEXT: smov w15, v1.h[6] +; CHECK-GI-NEXT: mov v0.h[2], w13 +; CHECK-GI-NEXT: sdiv w9, w9, w16 +; CHECK-GI-NEXT: smov w16, v1.h[0] +; CHECK-GI-NEXT: mov v0.h[3], w12 +; CHECK-GI-NEXT: smov w12, v1.h[7] +; CHECK-GI-NEXT: mov v0.h[4], w11 +; CHECK-GI-NEXT: sdiv w16, w16, w17 +; CHECK-GI-NEXT: smov w17, v1.h[1] +; CHECK-GI-NEXT: mov v0.h[5], w10 +; CHECK-GI-NEXT: mov v0.h[6], w8 +; CHECK-GI-NEXT: sdiv w17, w17, w18 +; CHECK-GI-NEXT: smov w18, v1.h[2] +; CHECK-GI-NEXT: fmov s2, w16 +; CHECK-GI-NEXT: smov w16, v3.h[6] +; CHECK-GI-NEXT: mov v0.h[7], w9 +; CHECK-GI-NEXT: sdiv w18, w18, w0 +; CHECK-GI-NEXT: smov w0, v1.h[3] +; CHECK-GI-NEXT: mov v2.h[1], w17 +; CHECK-GI-NEXT: sdiv w0, w0, w1 +; CHECK-GI-NEXT: smov w1, v1.h[4] +; CHECK-GI-NEXT: mov v2.h[2], w18 +; CHECK-GI-NEXT: sdiv w1, w1, w2 +; CHECK-GI-NEXT: smov w2, v1.h[5] +; CHECK-GI-NEXT: mov v2.h[3], w0 +; CHECK-GI-NEXT: sdiv w14, w2, w3 +; CHECK-GI-NEXT: mov v2.h[4], w1 +; CHECK-GI-NEXT: sdiv w13, w15, w16 +; CHECK-GI-NEXT: smov w15, v3.h[7] +; CHECK-GI-NEXT: mov v2.h[5], w14 +; CHECK-GI-NEXT: sdiv w10, w12, w15 +; CHECK-GI-NEXT: mov v2.h[6], w13 +; CHECK-GI-NEXT: mov v2.h[7], w10 +; CHECK-GI-NEXT: mov v1.16b, v2.16b +; CHECK-GI-NEXT: ret + %zero_and_ones = shufflevector <16 x i16> zeroinitializer, <16 x i16> splat (i16 1), <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> + %div = sdiv <16 x i16> %x, %zero_and_ones + ret <16 x i16> %div +} diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve-win.mir b/llvm/test/CodeGen/AArch64/framelayout-sve-win.mir index 5933c5d..b8302e6 100644 --- a/llvm/test/CodeGen/AArch64/framelayout-sve-win.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve-win.mir @@ -380,10 +380,8 @@ body: | ; CHECK-NEXT: frame-destroy SEH_EpilogStart ; CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 32, 0 ; CHECK-NEXT: frame-destroy SEH_StackAlloc 32 - ; CHECK-NEXT: $lr = frame-destroy LDRXui $sp, 0 :: (load (s64) from %stack.1) - ; CHECK-NEXT: frame-destroy SEH_SaveReg 30, 0 - ; CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 16, 0 - ; CHECK-NEXT: frame-destroy SEH_StackAlloc 16 + ; CHECK-NEXT: early-clobber $sp, $lr = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1) + ; CHECK-NEXT: frame-destroy SEH_SaveReg_X 30, -16 ; CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 0 :: (load (s16) from %stack.4) ; CHECK-NEXT: frame-destroy SEH_SavePReg 4, 0 ; CHECK-NEXT: $p5 = frame-destroy LDR_PXI $sp, 1 :: (load (s16) from %stack.3) @@ -430,10 +428,8 @@ body: | ; CHECK-NEXT: frame-destroy SEH_EpilogStart ; CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 32, 0 ; CHECK-NEXT: frame-destroy SEH_StackAlloc 32 - ; CHECK-NEXT: $lr = frame-destroy LDRXui $sp, 0 :: (load (s64) from %stack.1) - ; CHECK-NEXT: frame-destroy SEH_SaveReg 30, 0 - ; CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 16, 0 - ; CHECK-NEXT: frame-destroy SEH_StackAlloc 16 + ; CHECK-NEXT: early-clobber $sp, $lr = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1) + ; CHECK-NEXT: frame-destroy SEH_SaveReg_X 30, -16 ; CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.4) ; CHECK-NEXT: frame-destroy SEH_SaveZReg 8, 0 ; CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.3) @@ -557,10 +553,8 @@ body: | ; CHECK-NEXT: frame-destroy SEH_StackAlloc 32 ; CHECK-NEXT: $x21, $lr = frame-destroy LDPXi $sp, 2 :: (load (s64) from %stack.2), (load (s64) from %stack.3) ; CHECK-NEXT: frame-destroy SEH_SaveRegP 21, 30, 16 - ; CHECK-NEXT: $x19, $x20 = frame-destroy LDPXi $sp, 0 :: (load (s64) from %stack.4), (load (s64) from %stack.5) - ; CHECK-NEXT: frame-destroy SEH_SaveRegP 19, 20, 0 - ; CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 32, 0 - ; CHECK-NEXT: frame-destroy SEH_StackAlloc 32 + ; CHECK-NEXT: early-clobber $sp, $x19, $x20 = frame-destroy LDPXpost $sp, 4 :: (load (s64) from %stack.4), (load (s64) from %stack.5) + ; CHECK-NEXT: frame-destroy SEH_SaveRegP_X 19, 20, -32 ; CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.21) ; CHECK-NEXT: frame-destroy SEH_SaveZReg 8, 2 ; CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.20) @@ -745,10 +739,8 @@ body: | ; CHECK-NEXT: frame-destroy SEH_EpilogStart ; CHECK-NEXT: $sp = frame-destroy ADDXri $fp, 0, 0 ; CHECK-NEXT: frame-destroy SEH_SetFP - ; CHECK-NEXT: $fp, $lr = frame-destroy LDPXi $sp, 0 :: (load (s64) from %stack.2), (load (s64) from %stack.3) - ; CHECK-NEXT: frame-destroy SEH_SaveFPLR 0 - ; CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 16, 0 - ; CHECK-NEXT: frame-destroy SEH_StackAlloc 16 + ; CHECK-NEXT: early-clobber $sp, $fp, $lr = frame-destroy LDPXpost $sp, 2 :: (load (s64) from %stack.2), (load (s64) from %stack.3) + ; CHECK-NEXT: frame-destroy SEH_SaveFPLR_X -16 ; CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.19) ; CHECK-NEXT: frame-destroy SEH_SaveZReg 8, 2 ; CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.18) @@ -869,10 +861,8 @@ body: | ; CHECK-NEXT: frame-destroy SEH_EpilogStart ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 7, implicit $vg ; CHECK-NEXT: frame-destroy SEH_AllocZ 7 - ; CHECK-NEXT: $lr = frame-destroy LDRXui $sp, 0 :: (load (s64) from %stack.6) - ; CHECK-NEXT: frame-destroy SEH_SaveReg 30, 0 - ; CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 16, 0 - ; CHECK-NEXT: frame-destroy SEH_StackAlloc 16 + ; CHECK-NEXT: early-clobber $sp, $lr = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.6) + ; CHECK-NEXT: frame-destroy SEH_SaveReg_X 30, -16 ; CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.8) ; CHECK-NEXT: frame-destroy SEH_SaveZReg 8, 1 ; CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.7) diff --git a/llvm/test/CodeGen/AArch64/machine-sme-abi-find-insert-pt.mir b/llvm/test/CodeGen/AArch64/machine-sme-abi-find-insert-pt.mir new file mode 100644 index 0000000..3f174a6 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/machine-sme-abi-find-insert-pt.mir @@ -0,0 +1,227 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -mtriple=aarch64 -mattr=+sve -mattr=+sme -run-pass=aarch64-machine-sme-abi -verify-machineinstrs %s -o - | FileCheck %s + +--- | + ; Test moving a state change to be before a $nzcv def + define void @move_before_nzcv_def() "aarch64_inout_za" { ret void } + + ; Test moving a state change to a point where $x0 is live + define void @move_to_x0_live() "aarch64_inout_za" { ret void } + + ; Test we don't move before a previous state change. + define void @do_not_move_before_prior_state_change() "aarch64_za_state_agnostic" { ret void } + + ; Test we don't move into a call sequence. + define void @do_not_move_into_call() "aarch64_inout_za" { ret void } + + declare void @clobber() + declare void @inout_call() "aarch64_inout_za" +... +--- +name: move_before_nzcv_def +tracksRegLiveness: true +isSSA: true +noVRegs: false +body: | + bb.0: + + ; CHECK-LABEL: name: move_before_nzcv_def + ; CHECK: [[RDSVLI_XI:%[0-9]+]]:gpr64 = RDSVLI_XI 1, implicit $vg + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $sp + ; CHECK-NEXT: [[MSUBXrrr:%[0-9]+]]:gpr64 = MSUBXrrr [[RDSVLI_XI]], [[RDSVLI_XI]], [[COPY]] + ; CHECK-NEXT: $sp = COPY [[MSUBXrrr]] + ; CHECK-NEXT: STPXi [[MSUBXrrr]], [[RDSVLI_XI]], %stack.0, 0 + ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[ADDXri]] + ; CHECK-NEXT: MSR 56965, [[COPY1]] + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK-NEXT: RequiresZASavePseudo + ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK-NEXT: MSRpstatesvcrImm1 2, 1, implicit-def $nzcv + ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 56965, implicit-def $nzcv + ; CHECK-NEXT: $x0 = ADDXri %stack.0, 0, 0 + ; CHECK-NEXT: RestoreZAPseudo [[MRS]], $x0, &__arm_tpidr2_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x0 + ; CHECK-NEXT: MSR 56965, $xzr + ; CHECK-NEXT: $nzcv = IMPLICIT_DEF + ; CHECK-NEXT: $zab0 = IMPLICIT_DEF + ; CHECK-NEXT: FAKE_USE $nzcv + ; CHECK-NEXT: RET_ReallyLR + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + RequiresZASavePseudo + BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + + $nzcv = IMPLICIT_DEF + $zab0 = IMPLICIT_DEF + FAKE_USE $nzcv + + RET_ReallyLR +... +--- +name: move_to_x0_live +tracksRegLiveness: true +isSSA: true +noVRegs: false +body: | + bb.0: + + ; CHECK-LABEL: name: move_to_x0_live + ; CHECK: [[RDSVLI_XI:%[0-9]+]]:gpr64 = RDSVLI_XI 1, implicit $vg + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $sp + ; CHECK-NEXT: [[MSUBXrrr:%[0-9]+]]:gpr64 = MSUBXrrr [[RDSVLI_XI]], [[RDSVLI_XI]], [[COPY]] + ; CHECK-NEXT: $sp = COPY [[MSUBXrrr]] + ; CHECK-NEXT: STPXi [[MSUBXrrr]], [[RDSVLI_XI]], %stack.0, 0 + ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[ADDXri]] + ; CHECK-NEXT: MSR 56965, [[COPY1]] + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK-NEXT: RequiresZASavePseudo + ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK-NEXT: $x0 = IMPLICIT_DEF + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK-NEXT: MSRpstatesvcrImm1 2, 1, implicit-def $nzcv + ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 56965, implicit-def $nzcv + ; CHECK-NEXT: $x0 = ADDXri %stack.0, 0, 0 + ; CHECK-NEXT: RestoreZAPseudo [[MRS]], $x0, &__arm_tpidr2_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x0 + ; CHECK-NEXT: MSR 56965, $xzr + ; CHECK-NEXT: $x0 = COPY [[COPY2]] + ; CHECK-NEXT: $nzcv = IMPLICIT_DEF + ; CHECK-NEXT: FAKE_USE $x0 + ; CHECK-NEXT: $zab0 = IMPLICIT_DEF + ; CHECK-NEXT: FAKE_USE $nzcv + ; CHECK-NEXT: RET_ReallyLR + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + RequiresZASavePseudo + BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + + $x0 = IMPLICIT_DEF + + $nzcv = IMPLICIT_DEF + FAKE_USE $x0 + + $zab0 = IMPLICIT_DEF + FAKE_USE $nzcv + + RET_ReallyLR +... +--- +name: do_not_move_before_prior_state_change +tracksRegLiveness: true +isSSA: true +noVRegs: false +body: | + ; CHECK-LABEL: name: do_not_move_before_prior_state_change + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: BL &__arm_sme_state_size, csr_aarch64_sme_abi_support_routines_preservemost_from_x1, implicit-def $lr, implicit $sp, implicit-def $x0 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK-NEXT: $sp = SUBXrx64 $sp, [[COPY]], 24 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $sp + ; CHECK-NEXT: $nzcv = IMPLICIT_DEF + ; CHECK-NEXT: $zab0 = IMPLICIT_DEF + ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 55824, implicit-def $nzcv, implicit $nzcv + ; CHECK-NEXT: $x0 = COPY [[COPY1]] + ; CHECK-NEXT: BL &__arm_sme_save, csr_aarch64_sme_abi_support_routines_preservemost_from_x1, implicit-def $lr, implicit $sp, implicit $x0 + ; CHECK-NEXT: MSR 55824, [[MRS]], implicit-def $nzcv + ; CHECK-NEXT: Bcc 2, %bb.1, implicit $nzcv + ; CHECK-NEXT: B %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: liveins: $nzcv + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: FAKE_USE $nzcv + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK-NEXT: RequiresZASavePseudo + ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK-NEXT: $x0 = COPY [[COPY1]] + ; CHECK-NEXT: BL &__arm_sme_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x1, implicit-def $lr, implicit $sp, implicit $x0 + ; CHECK-NEXT: RET_ReallyLR + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK-NEXT: RequiresZASavePseudo + ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK-NEXT: $x0 = COPY [[COPY1]] + ; CHECK-NEXT: BL &__arm_sme_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x1, implicit-def $lr, implicit $sp, implicit $x0 + ; CHECK-NEXT: RET_ReallyLR + bb.0: + successors: %bb.1, %bb.2 + + ; The insertion point can move before the $nzcv def (as that would require + ; moving before a $zab0 def -- that requires the ACTIVE state). + $nzcv = IMPLICIT_DEF + $zab0 = IMPLICIT_DEF + Bcc 2, %bb.1, implicit $nzcv + B %bb.2 + ; bb.1 and bb.2 both require ZA saved on entry (to force bb.0's exit bundle to + ; pick the LOCAL_SAVED state). + bb.1: + liveins: $nzcv + FAKE_USE $nzcv + + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + RequiresZASavePseudo + BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + + RET_ReallyLR + bb.2: + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + RequiresZASavePseudo + BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + + RET_ReallyLR +... +--- +name: do_not_move_into_call +tracksRegLiveness: true +isSSA: true +noVRegs: false +body: | + bb.0: + + ; CHECK-LABEL: name: do_not_move_into_call + ; CHECK: [[RDSVLI_XI:%[0-9]+]]:gpr64 = RDSVLI_XI 1, implicit $vg + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $sp + ; CHECK-NEXT: [[MSUBXrrr:%[0-9]+]]:gpr64 = MSUBXrrr [[RDSVLI_XI]], [[RDSVLI_XI]], [[COPY]] + ; CHECK-NEXT: $sp = COPY [[MSUBXrrr]] + ; CHECK-NEXT: STPXi [[MSUBXrrr]], [[RDSVLI_XI]], %stack.0, 0 + ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[ADDXri]] + ; CHECK-NEXT: MSR 56965, [[COPY1]] + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK-NEXT: RequiresZASavePseudo + ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + ; CHECK-NEXT: $nzcv = IMPLICIT_DEF + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 55824, implicit-def $nzcv, implicit $nzcv + ; CHECK-NEXT: MSRpstatesvcrImm1 2, 1, implicit-def $nzcv + ; CHECK-NEXT: [[MRS1:%[0-9]+]]:gpr64 = MRS 56965, implicit-def $nzcv + ; CHECK-NEXT: $x0 = ADDXri %stack.0, 0, 0 + ; CHECK-NEXT: RestoreZAPseudo [[MRS1]], $x0, &__arm_tpidr2_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x0 + ; CHECK-NEXT: MSR 56965, $xzr + ; CHECK-NEXT: MSR 55824, [[MRS]], implicit-def $nzcv + ; CHECK-NEXT: $zab0 = IMPLICIT_DEF + ; CHECK-NEXT: FAKE_USE $nzcv + ; CHECK-NEXT: RET_ReallyLR + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + RequiresZASavePseudo + BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp + + ; This is artificial test where NZCV is def'd inside a call, so we can't + ; move the insert point before it's definition. + $nzcv = IMPLICIT_DEF + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + + $zab0 = IMPLICIT_DEF + FAKE_USE $nzcv + + RET_ReallyLR +... diff --git a/llvm/test/CodeGen/AArch64/mir-yaml-has-streaming-mode-changes.ll b/llvm/test/CodeGen/AArch64/mir-yaml-has-streaming-mode-changes.ll new file mode 100644 index 0000000..8f1fe5c --- /dev/null +++ b/llvm/test/CodeGen/AArch64/mir-yaml-has-streaming-mode-changes.ll @@ -0,0 +1,13 @@ +; RUN: llc -mtriple=aarch64 -mattr=+sme -stop-after=aarch64-isel < %s | FileCheck %s + +target triple = "aarch64" + +declare void @foo() "aarch64_pstate_sm_enabled" + +define dso_local void @bar() local_unnamed_addr { +; CHECK-LABEL: name: bar +; CHECK: hasStreamingModeChanges: true +entry: + tail call void @foo() "aarch64_pstate_sm_enabled" + ret void +} diff --git a/llvm/test/CodeGen/AArch64/sat-add.ll b/llvm/test/CodeGen/AArch64/sat-add.ll index ecd48d6..149b4c4 100644 --- a/llvm/test/CodeGen/AArch64/sat-add.ll +++ b/llvm/test/CodeGen/AArch64/sat-add.ll @@ -290,8 +290,7 @@ define i32 @unsigned_sat_variable_i32_using_cmp_sum(i32 %x, i32 %y) { define i32 @unsigned_sat_variable_i32_using_cmp_notval(i32 %x, i32 %y) { ; CHECK-LABEL: unsigned_sat_variable_i32_using_cmp_notval: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, w1 -; CHECK-NEXT: cmn w1, w0 +; CHECK-NEXT: adds w8, w1, w0 ; CHECK-NEXT: csinv w0, w8, wzr, lo ; CHECK-NEXT: ret %noty = xor i32 %y, -1 @@ -331,8 +330,7 @@ define i64 @unsigned_sat_variable_i64_using_cmp_sum(i64 %x, i64 %y) { define i64 @unsigned_sat_variable_i64_using_cmp_notval(i64 %x, i64 %y) { ; CHECK-LABEL: unsigned_sat_variable_i64_using_cmp_notval: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, x1 -; CHECK-NEXT: cmn x1, x0 +; CHECK-NEXT: adds x8, x1, x0 ; CHECK-NEXT: csinv x0, x8, xzr, lo ; CHECK-NEXT: ret %noty = xor i64 %y, -1 diff --git a/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll b/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll index e3007a3..e4f9efa 100644 --- a/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll +++ b/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll @@ -391,11 +391,9 @@ define void @agnostic_za_buffer_alloc_with_stack_probes() nounwind "aarch64_za_s ; CHECK-NEWLOWERING-NEXT: sub x19, x8, x0 ; CHECK-NEWLOWERING-NEXT: .LBB7_1: // =>This Inner Loop Header: Depth=1 ; CHECK-NEWLOWERING-NEXT: sub sp, sp, #16, lsl #12 // =65536 -; CHECK-NEWLOWERING-NEXT: cmp sp, x19 ; CHECK-NEWLOWERING-NEXT: mov x0, x19 -; CHECK-NEWLOWERING-NEXT: mrs x8, NZCV ; CHECK-NEWLOWERING-NEXT: bl __arm_sme_save -; CHECK-NEWLOWERING-NEXT: msr NZCV, x8 +; CHECK-NEWLOWERING-NEXT: cmp sp, x19 ; CHECK-NEWLOWERING-NEXT: b.le .LBB7_3 ; CHECK-NEWLOWERING-NEXT: // %bb.2: // in Loop: Header=BB7_1 Depth=1 ; CHECK-NEWLOWERING-NEXT: mov x0, x19 diff --git a/llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir b/llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir index 18764d5..9f33c06 100644 --- a/llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir +++ b/llvm/test/CodeGen/AArch64/sme-lazy-sve-nzcv-live.mir @@ -62,14 +62,12 @@ body: | ; CHECK-NEXT: RequiresZASavePseudo ; CHECK-NEXT: BL @clobber, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp - ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY1]], 101, 0, implicit-def $nzcv - ; CHECK-NEXT: [[MRS:%[0-9]+]]:gpr64 = MRS 55824, implicit-def $nzcv, implicit $nzcv ; CHECK-NEXT: MSRpstatesvcrImm1 2, 1, implicit-def $nzcv ; CHECK-NEXT: [[MRS1:%[0-9]+]]:gpr64 = MRS 56965, implicit-def $nzcv ; CHECK-NEXT: $x0 = ADDXri %stack.0, 0, 0 ; CHECK-NEXT: RestoreZAPseudo [[MRS1]], $x0, &__arm_tpidr2_restore, csr_aarch64_sme_abi_support_routines_preservemost_from_x0 ; CHECK-NEXT: MSR 56965, $xzr - ; CHECK-NEXT: MSR 55824, [[MRS]], implicit-def $nzcv + ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY1]], 101, 0, implicit-def $nzcv ; CHECK-NEXT: Bcc 11, %bb.2, implicit $nzcv ; CHECK-NEXT: B %bb.1 ; CHECK-NEXT: {{ $}} @@ -116,16 +114,14 @@ body: | # CHECK-ASM-LABEL: cmp_branch # CHECK-ASM: msr TPIDR2_EL0, x10 # CHECK-ASM-NEXT: bl clobber -# CHECK-ASM-NEXT: cmp w20, #101 -# CHECK-ASM-NEXT: mrs x8, NZCV # CHECK-ASM-NEXT: smstart za -# CHECK-ASM-NEXT: mrs x9, TPIDR2_EL0 +# CHECK-ASM-NEXT: mrs x8, TPIDR2_EL0 # CHECK-ASM-NEXT: sub x0, x29, #16 -# CHECK-ASM-NEXT: cbnz x9, .LBB0_2 +# CHECK-ASM-NEXT: cbnz x8, .LBB0_2 # CHECK-ASM: bl __arm_tpidr2_restore # CHECK-ASM-NEXT: .LBB0_2: +# CHECK-ASM-NEXT: cmp w20, #101 # CHECK-ASM-NEXT: msr TPIDR2_EL0, xzr -# CHECK-ASM-NEXT: msr NZCV, x8 # CHECK-ASM-NEXT: b.lt .LBB0_4 # CHECK-ASM: bl inout_call # CHECK-ASM-NEXT: .LBB0_4: diff --git a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll index b6dee97e..b8d6c88 100644 --- a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll +++ b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll @@ -732,6 +732,247 @@ exit: ret void } +; This example corresponds to: +; +; __arm_agnostic("sme_za_state") void try_catch_agnostic_za_invoke() +; { +; try { +; agnostic_za_call(); +; } catch(...) { +; } +; } +; +; In this example we preserve all SME state enabled by PSTATE.ZA using +; `__arm_sme_save` before agnostic_za_call(). This is because on all normal +; returns from an agnostic ZA function ZA state should be preserved. That means +; we need to make sure ZA state is saved in case agnostic_za_call() throws, and +; we need to restore ZA state after unwinding to the catch block. + +define void @try_catch_agnostic_za_invoke() "aarch64_za_state_agnostic" personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: try_catch_agnostic_za_invoke: +; CHECK: .Lfunc_begin5: +; CHECK-NEXT: .cfi_startproc +; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-NEXT: .cfi_lsda 28, .Lexception5 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .cfi_def_cfa w29, 32 +; CHECK-NEXT: .cfi_offset w19, -16 +; CHECK-NEXT: .cfi_offset w30, -24 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: bl __arm_sme_state_size +; CHECK-NEXT: sub sp, sp, x0 +; CHECK-NEXT: mov x19, sp +; CHECK-NEXT: .Ltmp15: // EH_LABEL +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl __arm_sme_save +; CHECK-NEXT: bl agnostic_za_call +; CHECK-NEXT: .Ltmp16: // EH_LABEL +; CHECK-NEXT: .LBB5_1: // %exit +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl __arm_sme_restore +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB5_2: // %catch +; CHECK-NEXT: .Ltmp17: // EH_LABEL +; CHECK-NEXT: bl __cxa_begin_catch +; CHECK-NEXT: bl __cxa_end_catch +; CHECK-NEXT: b .LBB5_1 +; +; CHECK-SDAG-LABEL: try_catch_agnostic_za_invoke: +; CHECK-SDAG: .Lfunc_begin5: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception5 +; CHECK-SDAG-NEXT: // %bb.0: // %entry +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: bl __arm_sme_state_size +; CHECK-SDAG-NEXT: sub sp, sp, x0 +; CHECK-SDAG-NEXT: mov x19, sp +; CHECK-SDAG-NEXT: .Ltmp15: // EH_LABEL +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: bl agnostic_za_call +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: .Ltmp16: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB5_1: // %exit +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB5_2: // %catch +; CHECK-SDAG-NEXT: .Ltmp17: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: b .LBB5_1 +entry: + invoke void @agnostic_za_call() + to label %exit unwind label %catch + +catch: + %eh_info = landingpad { ptr, i32 } + catch ptr null + %exception_ptr = extractvalue { ptr, i32 } %eh_info, 0 + tail call ptr @__cxa_begin_catch(ptr %exception_ptr) + tail call void @__cxa_end_catch() + br label %exit + +exit: + ret void +} + +; This is the same `try_catch_agnostic_za_invoke`, but shows a lazy save would +; also need to be committed in a shared-ZA function calling an agnostic-ZA function. +define void @try_catch_inout_za_agnostic_za_callee() "aarch64_inout_za" personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: try_catch_inout_za_agnostic_za_callee: +; CHECK: .Lfunc_begin6: +; CHECK-NEXT: .cfi_startproc +; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-NEXT: .cfi_lsda 28, .Lexception6 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa w29, 16 +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdsvl x8, #1 +; CHECK-NEXT: mov x9, sp +; CHECK-NEXT: msub x9, x8, x8, x9 +; CHECK-NEXT: mov sp, x9 +; CHECK-NEXT: stp x9, x8, [x29, #-16] +; CHECK-NEXT: .Ltmp18: // EH_LABEL +; CHECK-NEXT: sub x8, x29, #16 +; CHECK-NEXT: msr TPIDR2_EL0, x8 +; CHECK-NEXT: bl agnostic_za_call +; CHECK-NEXT: .Ltmp19: // EH_LABEL +; CHECK-NEXT: .LBB6_1: // %exit +; CHECK-NEXT: smstart za +; CHECK-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: cbnz x8, .LBB6_3 +; CHECK-NEXT: // %bb.2: // %exit +; CHECK-NEXT: bl __arm_tpidr2_restore +; CHECK-NEXT: .LBB6_3: // %exit +; CHECK-NEXT: msr TPIDR2_EL0, xzr +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB6_4: // %catch +; CHECK-NEXT: .Ltmp20: // EH_LABEL +; CHECK-NEXT: bl __cxa_begin_catch +; CHECK-NEXT: bl __cxa_end_catch +; CHECK-NEXT: b .LBB6_1 +; +; CHECK-SDAG-LABEL: try_catch_inout_za_agnostic_za_callee: +; CHECK-SDAG: .Lfunc_begin6: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception6 +; CHECK-SDAG-NEXT: // %bb.0: // %entry +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: .Ltmp18: // EH_LABEL +; CHECK-SDAG-NEXT: sub x19, x29, #16 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl agnostic_za_call +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB6_2 +; CHECK-SDAG-NEXT: // %bb.1: // %entry +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB6_2: // %entry +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .Ltmp19: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB6_3: // %exit +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB6_4: // %catch +; CHECK-SDAG-NEXT: .Ltmp20: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB6_6 +; CHECK-SDAG-NEXT: // %bb.5: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB6_6: // %catch +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB6_8 +; CHECK-SDAG-NEXT: // %bb.7: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB6_8: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB6_10 +; CHECK-SDAG-NEXT: // %bb.9: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB6_10: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: b .LBB6_3 +entry: + invoke void @agnostic_za_call() + to label %exit unwind label %catch + +catch: + %eh_info = landingpad { ptr, i32 } + catch ptr null + %exception_ptr = extractvalue { ptr, i32 } %eh_info, 0 + tail call ptr @__cxa_begin_catch(ptr %exception_ptr) + tail call void @__cxa_end_catch() + br label %exit + +exit: + ret void +} + declare ptr @__cxa_allocate_exception(i64) declare void @__cxa_throw(ptr, ptr, ptr) declare ptr @__cxa_begin_catch(ptr) @@ -742,3 +983,4 @@ declare void @may_throw() declare void @shared_za_call() "aarch64_inout_za" declare void @noexcept_shared_za_call() "aarch64_inout_za" declare void @shared_zt0_call() "aarch64_inout_zt0" +declare void @agnostic_za_call() "aarch64_za_state_agnostic" diff --git a/llvm/test/CodeGen/AArch64/sve-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-fp-reduce.ll index 15ee6a0..36655f6 100644 --- a/llvm/test/CodeGen/AArch64/sve-fp-reduce.ll +++ b/llvm/test/CodeGen/AArch64/sve-fp-reduce.ll @@ -359,12 +359,177 @@ define float @fadd_reduct_reassoc_v4v8f32(<vscale x 4 x float> %a, <vscale x 8 x ret float %r } +; No FMULV instruction so use knowledge about the architectural maximum size of +; an SVE register to "scalarise" the reduction. + +define half @fmulv_nxv2f16(half %init, <vscale x 2 x half> %a) { +; CHECK-LABEL: fmulv_nxv2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov z2.h, #1.00000000 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: fmul h0, h0, h1 +; CHECK-NEXT: ret + %res = call fast half @llvm.vector.reduce.fmul.nxv2f16(half %init, <vscale x 2 x half> %a) + ret half %res +} + +define half @fmulv_nxv4f16(half %init, <vscale x 4 x half> %a) { +; CHECK-LABEL: fmulv_nxv4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov z2.h, #1.00000000 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: uzp2 z3.s, z1.s, z2.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.s, z1.s, z2.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.s, z1.s, z2.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.s, z1.s, z2.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.s, z1.s, z2.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.s, z1.s, z2.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: fmul h0, h0, h1 +; CHECK-NEXT: ret + %res = call fast half @llvm.vector.reduce.fmul.nxv4f16(half %init, <vscale x 4 x half> %a) + ret half %res +} + +define half @fmulv_nxv8f16(half %init, <vscale x 8 x half> %a) { +; CHECK-LABEL: fmulv_nxv8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov z2.h, #1.00000000 +; CHECK-NEXT: uzp2 z3.h, z1.h, z2.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h +; CHECK-NEXT: fmul z1.h, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.h, z1.h, z2.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h +; CHECK-NEXT: fmul z1.h, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.h, z1.h, z2.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h +; CHECK-NEXT: fmul z1.h, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.h, z1.h, z2.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h +; CHECK-NEXT: fmul z1.h, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.h, z1.h, z2.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h +; CHECK-NEXT: fmul z1.h, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.h, z1.h, z2.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h +; CHECK-NEXT: fmul z1.h, z1.h, z3.h +; CHECK-NEXT: uzp2 z3.h, z1.h, z2.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h +; CHECK-NEXT: fmul z1.h, z1.h, z3.h +; CHECK-NEXT: fmul h0, h0, h1 +; CHECK-NEXT: ret + %res = call fast half @llvm.vector.reduce.fmul.nxv8f16(half %init, <vscale x 8 x half> %a) + ret half %res +} + +define float @fmulv_nxv2f32(float %init, <vscale x 2 x float> %a) { +; CHECK-LABEL: fmulv_nxv2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov z2.s, #1.00000000 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.s, p0/m, z1.s, z3.s +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.s, p0/m, z1.s, z3.s +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.s, p0/m, z1.s, z3.s +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.s, p0/m, z1.s, z3.s +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.s, p0/m, z1.s, z3.s +; CHECK-NEXT: fmul s0, s0, s1 +; CHECK-NEXT: ret + %res = call fast float @llvm.vector.reduce.fmul.nxv2f32(float %init, <vscale x 2 x float> %a) + ret float %res +} + +define float @fmulv_nxv4f32(float %init, <vscale x 4 x float> %a) { +; CHECK-LABEL: fmulv_nxv4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov z2.s, #1.00000000 +; CHECK-NEXT: uzp2 z3.s, z1.s, z2.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NEXT: fmul z1.s, z1.s, z3.s +; CHECK-NEXT: uzp2 z3.s, z1.s, z2.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NEXT: fmul z1.s, z1.s, z3.s +; CHECK-NEXT: uzp2 z3.s, z1.s, z2.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NEXT: fmul z1.s, z1.s, z3.s +; CHECK-NEXT: uzp2 z3.s, z1.s, z2.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NEXT: fmul z1.s, z1.s, z3.s +; CHECK-NEXT: uzp2 z3.s, z1.s, z2.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NEXT: fmul z1.s, z1.s, z3.s +; CHECK-NEXT: uzp2 z3.s, z1.s, z2.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s +; CHECK-NEXT: fmul z1.s, z1.s, z3.s +; CHECK-NEXT: fmul s0, s0, s1 +; CHECK-NEXT: ret + %res = call fast float @llvm.vector.reduce.fmul.nxv4f32(float %init, <vscale x 4 x float> %a) + ret float %res +} + +define double @fmulv_nxv2f64(double %init, <vscale x 2 x double> %a) { +; CHECK-LABEL: fmulv_nxv2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov z2.d, #1.00000000 +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.d, z1.d, z3.d +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.d, z1.d, z3.d +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.d, z1.d, z3.d +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.d, z1.d, z3.d +; CHECK-NEXT: uzp2 z3.d, z1.d, z2.d +; CHECK-NEXT: uzp1 z1.d, z1.d, z2.d +; CHECK-NEXT: fmul z1.d, z1.d, z3.d +; CHECK-NEXT: fmul d0, d0, d1 +; CHECK-NEXT: ret + %res = call fast double @llvm.vector.reduce.fmul.nxv2f64(double %init, <vscale x 2 x double> %a) + ret double %res +} + declare half @llvm.vector.reduce.fadd.nxv2f16(half, <vscale x 2 x half>) declare half @llvm.vector.reduce.fadd.nxv4f16(half, <vscale x 4 x half>) declare half @llvm.vector.reduce.fadd.nxv8f16(half, <vscale x 8 x half>) -declare half @llvm.vector.reduce.fadd.nxv6f16(half, <vscale x 6 x half>) -declare half @llvm.vector.reduce.fadd.nxv10f16(half, <vscale x 10 x half>) -declare half @llvm.vector.reduce.fadd.nxv12f16(half, <vscale x 12 x half>) declare float @llvm.vector.reduce.fadd.nxv2f32(float, <vscale x 2 x float>) declare float @llvm.vector.reduce.fadd.nxv4f32(float, <vscale x 4 x float>) declare float @llvm.vector.reduce.fadd.nxv8f32(float, <vscale x 8 x float>) @@ -397,3 +562,10 @@ declare half @llvm.vector.reduce.fminimum.nxv8f16(<vscale x 8 x half>) declare float @llvm.vector.reduce.fminimum.nxv2f32(<vscale x 2 x float>) declare float @llvm.vector.reduce.fminimum.nxv4f32(<vscale x 4 x float>) declare double @llvm.vector.reduce.fminimum.nxv2f64(<vscale x 2 x double>) + +declare half @llvm.vector.reduce.fmul.nxv2f16(half, <vscale x 2 x half>) +declare half @llvm.vector.reduce.fmul.nxv4f16(half, <vscale x 4 x half>) +declare half @llvm.vector.reduce.fmul.nxv8f16(half, <vscale x 8 x half>) +declare float @llvm.vector.reduce.fmul.nxv2f32(float, <vscale x 2 x float>) +declare float @llvm.vector.reduce.fmul.nxv4f32(float, <vscale x 4 x float>) +declare double @llvm.vector.reduce.fmul.nxv2f64(double, <vscale x 2 x double>) diff --git a/llvm/test/CodeGen/AArch64/sve-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-int-reduce.ll index be936f0..6fb0315 100644 --- a/llvm/test/CodeGen/AArch64/sve-int-reduce.ll +++ b/llvm/test/CodeGen/AArch64/sve-int-reduce.ll @@ -369,6 +369,131 @@ define i64 @smax_nxv2i64(<vscale x 2 x i64> %a) { ret i64 %res } +; No MULV instruction so use knowledge about the architectural maximum size of +; an SVE register to "scalarise" the reduction. + +define i8 @mulv_nxv16i8(<vscale x 16 x i8> %a) { +; CHECK-LABEL: mulv_nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z1.b, #1 // =0x1 +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: uzp2 z2.b, z0.b, z1.b +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: uzp2 z2.b, z0.b, z1.b +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: uzp2 z2.b, z0.b, z1.b +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: uzp2 z2.b, z0.b, z1.b +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: uzp2 z2.b, z0.b, z1.b +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: uzp2 z2.b, z0.b, z1.b +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: uzp2 z2.b, z0.b, z1.b +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: uzp2 z2.b, z0.b, z1.b +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.mul.nxv16i8(<vscale x 16 x i8> %a) + ret i8 %res +} + +define i16 @mulv_nxv8i16(<vscale x 8 x i16> %a) { +; CHECK-LABEL: mulv_nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z1.h, #1 // =0x1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: uzp2 z2.h, z0.h, z1.h +; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: uzp2 z2.h, z0.h, z1.h +; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: uzp2 z2.h, z0.h, z1.h +; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: uzp2 z2.h, z0.h, z1.h +; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: uzp2 z2.h, z0.h, z1.h +; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: uzp2 z2.h, z0.h, z1.h +; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: uzp2 z2.h, z0.h, z1.h +; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.mul.nxv8i16(<vscale x 8 x i16> %a) + ret i16 %res +} + +define i32 @mulv_nxv4i32(<vscale x 4 x i32> %a) { +; CHECK-LABEL: mulv_nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z1.s, #1 // =0x1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: uzp2 z2.s, z0.s, z1.s +; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: uzp2 z2.s, z0.s, z1.s +; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: uzp2 z2.s, z0.s, z1.s +; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: uzp2 z2.s, z0.s, z1.s +; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: uzp2 z2.s, z0.s, z1.s +; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: uzp2 z2.s, z0.s, z1.s +; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.mul.nxv4i32(<vscale x 4 x i32> %a) + ret i32 %res +} + +define i64 @mulv_nxv2i64(<vscale x 2 x i64> %a) { +; CHECK-LABEL: mulv_nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z1.d, #1 // =0x1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: uzp2 z2.d, z0.d, z1.d +; CHECK-NEXT: uzp1 z0.d, z0.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: uzp2 z2.d, z0.d, z1.d +; CHECK-NEXT: uzp1 z0.d, z0.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: uzp2 z2.d, z0.d, z1.d +; CHECK-NEXT: uzp1 z0.d, z0.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: uzp2 z2.d, z0.d, z1.d +; CHECK-NEXT: uzp1 z0.d, z0.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: uzp2 z2.d, z0.d, z1.d +; CHECK-NEXT: uzp1 z0.d, z0.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %res = call i64 @llvm.vector.reduce.mul.nxv2i64(<vscale x 2 x i64> %a) + ret i64 %res +} + ; Test widen vector reduce type declare i8 @llvm.vector.reduce.smin.nxv10i8(<vscale x 10 x i8>) diff --git a/llvm/test/CodeGen/AArch64/win-sve.ll b/llvm/test/CodeGen/AArch64/win-sve.ll index 53ac934..3ba4a1c 100644 --- a/llvm/test/CodeGen/AArch64/win-sve.ll +++ b/llvm/test/CodeGen/AArch64/win-sve.ll @@ -75,10 +75,8 @@ define i32 @f(<vscale x 2 x i64> %x) { ; CHECK-NEXT: .seh_startepilogue ; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload ; CHECK-NEXT: .seh_save_reg x30, 8 -; CHECK-NEXT: ldr x28, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x28, 0 -; CHECK-NEXT: add sp, sp, #16 -; CHECK-NEXT: .seh_stackalloc 16 +; CHECK-NEXT: ldr x28, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x28, 16 ; CHECK-NEXT: ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 2 ; CHECK-NEXT: ldr z9, [sp, #3, mul vl] // 16-byte Folded Reload @@ -234,10 +232,8 @@ define void @f2(i64 %n, <vscale x 2 x i64> %x) { ; CHECK-NEXT: .seh_save_fplr 16 ; CHECK-NEXT: ldr x28, [sp, #8] // 8-byte Folded Reload ; CHECK-NEXT: .seh_save_reg x28, 8 -; CHECK-NEXT: ldr x19, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x19, 0 -; CHECK-NEXT: add sp, sp, #32 -; CHECK-NEXT: .seh_stackalloc 32 +; CHECK-NEXT: ldr x19, [sp], #32 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x19, 32 ; CHECK-NEXT: ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 2 ; CHECK-NEXT: ldr z9, [sp, #3, mul vl] // 16-byte Folded Reload @@ -384,10 +380,8 @@ define void @f3(i64 %n, <vscale x 2 x i64> %x) { ; CHECK-NEXT: .seh_stackalloc 16 ; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload ; CHECK-NEXT: .seh_save_reg x30, 8 -; CHECK-NEXT: ldr x28, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x28, 0 -; CHECK-NEXT: add sp, sp, #16 -; CHECK-NEXT: .seh_stackalloc 16 +; CHECK-NEXT: ldr x28, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x28, 16 ; CHECK-NEXT: ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 2 ; CHECK-NEXT: ldr z9, [sp, #3, mul vl] // 16-byte Folded Reload @@ -538,10 +532,8 @@ define void @f4(i64 %n, <vscale x 2 x i64> %x) { ; CHECK-NEXT: .seh_stackalloc 16 ; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload ; CHECK-NEXT: .seh_save_reg x30, 8 -; CHECK-NEXT: ldr x28, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x28, 0 -; CHECK-NEXT: add sp, sp, #16 -; CHECK-NEXT: .seh_stackalloc 16 +; CHECK-NEXT: ldr x28, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x28, 16 ; CHECK-NEXT: ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 2 ; CHECK-NEXT: ldr z9, [sp, #3, mul vl] // 16-byte Folded Reload @@ -702,10 +694,8 @@ define void @f5(i64 %n, <vscale x 2 x i64> %x) { ; CHECK-NEXT: .seh_save_fplr 16 ; CHECK-NEXT: ldr x28, [sp, #8] // 8-byte Folded Reload ; CHECK-NEXT: .seh_save_reg x28, 8 -; CHECK-NEXT: ldr x19, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x19, 0 -; CHECK-NEXT: add sp, sp, #32 -; CHECK-NEXT: .seh_stackalloc 32 +; CHECK-NEXT: ldr x19, [sp], #32 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x19, 32 ; CHECK-NEXT: ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 2 ; CHECK-NEXT: ldr z9, [sp, #3, mul vl] // 16-byte Folded Reload @@ -860,10 +850,10 @@ define void @f6(<vscale x 2 x i64> %x, [8 x i64] %pad, i64 %n9) personality ptr ; CHECK-NEXT: stur x0, [x8, #16] ; CHECK-NEXT: addvl x8, x29, #18 ; CHECK-NEXT: ldr x1, [x8, #32] -; CHECK-NEXT: .Ltmp0: +; CHECK-NEXT: .Ltmp0: // EH_LABEL ; CHECK-NEXT: add x0, x19, #0 ; CHECK-NEXT: bl g6 -; CHECK-NEXT: .Ltmp1: +; CHECK-NEXT: .Ltmp1: // EH_LABEL ; CHECK-NEXT: // %bb.1: // %invoke.cont ; CHECK-NEXT: .seh_startepilogue ; CHECK-NEXT: add sp, sp, #64 @@ -872,10 +862,8 @@ define void @f6(<vscale x 2 x i64> %x, [8 x i64] %pad, i64 %n9) personality ptr ; CHECK-NEXT: .seh_save_fplr 16 ; CHECK-NEXT: ldr x28, [sp, #8] // 8-byte Folded Reload ; CHECK-NEXT: .seh_save_reg x28, 8 -; CHECK-NEXT: ldr x19, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x19, 0 -; CHECK-NEXT: add sp, sp, #32 -; CHECK-NEXT: .seh_stackalloc 32 +; CHECK-NEXT: ldr x19, [sp], #32 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x19, 32 ; CHECK-NEXT: ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 2 ; CHECK-NEXT: ldr z9, [sp, #3, mul vl] // 16-byte Folded Reload @@ -932,8 +920,6 @@ define void @f6(<vscale x 2 x i64> %x, [8 x i64] %pad, i64 %n9) personality ptr ; CHECK-NEXT: .seh_save_preg p14, 10 ; CHECK-NEXT: ldr p15, [sp, #11, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: .seh_save_preg p15, 11 -; CHECK-NEXT: add sp, sp, #16 -; CHECK-NEXT: .seh_stackalloc 16 ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: .seh_allocz 18 ; CHECK-NEXT: add sp, sp, #16 @@ -1024,10 +1010,8 @@ define void @f6(<vscale x 2 x i64> %x, [8 x i64] %pad, i64 %n9) personality ptr ; CHECK-NEXT: .seh_save_fplr 16 ; CHECK-NEXT: ldr x28, [sp, #8] // 8-byte Folded Reload ; CHECK-NEXT: .seh_save_reg x28, 8 -; CHECK-NEXT: ldr x19, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x19, 0 -; CHECK-NEXT: add sp, sp, #32 -; CHECK-NEXT: .seh_stackalloc 32 +; CHECK-NEXT: ldr x19, [sp], #32 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x19, 32 ; CHECK-NEXT: ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 2 ; CHECK-NEXT: ldr z9, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1144,10 +1128,8 @@ define void @f8(<vscale x 2 x i64> %v) { ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: .seh_startepilogue -; CHECK-NEXT: ldr x30, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x30, 0 -; CHECK-NEXT: add sp, sp, #16 -; CHECK-NEXT: .seh_stackalloc 16 +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 ; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 0 ; CHECK-NEXT: addvl sp, sp, #1 @@ -1196,14 +1178,10 @@ define void @f9(<vscale x 2 x i64> %v, ...) { ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: .seh_startepilogue -; CHECK-NEXT: ldr x30, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x30, 0 -; CHECK-NEXT: add sp, sp, #16 -; CHECK-NEXT: .seh_stackalloc 16 +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 ; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 0 -; CHECK-NEXT: add sp, sp, #64 -; CHECK-NEXT: .seh_stackalloc 64 ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: .seh_allocz 1 ; CHECK-NEXT: add sp, sp, #64 @@ -1301,10 +1279,8 @@ define void @f10(i64 %n, <vscale x 2 x i64> %x) "frame-pointer"="all" { ; CHECK-NEXT: .seh_stackalloc 16 ; CHECK-NEXT: ldp x29, x30, [sp, #8] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_fplr 8 -; CHECK-NEXT: ldr x28, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x28, 0 -; CHECK-NEXT: add sp, sp, #32 -; CHECK-NEXT: .seh_stackalloc 32 +; CHECK-NEXT: ldr x28, [sp], #32 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x28, 32 ; CHECK-NEXT: ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 2 ; CHECK-NEXT: ldr z9, [sp, #3, mul vl] // 16-byte Folded Reload @@ -1390,10 +1366,8 @@ define i32 @f11(double %d, <vscale x 4 x i32> %vs) "aarch64_pstate_sm_compatible ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: str d0, [sp, #8] ; CHECK-NEXT: .seh_startepilogue -; CHECK-NEXT: ldr x30, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x30, 0 -; CHECK-NEXT: add sp, sp, #16 -; CHECK-NEXT: .seh_stackalloc 16 +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 ; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 0 ; CHECK-NEXT: addvl sp, sp, #1 @@ -1431,10 +1405,8 @@ define i32 @f12(double %d, <vscale x 4 x i32> %vs) "aarch64_pstate_sm_compatible ; CHECK-NEXT: .seh_startepilogue ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: .seh_allocz 1 -; CHECK-NEXT: ldr x30, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x30, 0 -; CHECK-NEXT: add sp, sp, #16 -; CHECK-NEXT: .seh_stackalloc 16 +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 ; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 0 ; CHECK-NEXT: addvl sp, sp, #1 @@ -1475,10 +1447,8 @@ define i32 @f13(double %d, <vscale x 4 x i32> %vs) "frame-pointer"="all" { ; CHECK-NEXT: .seh_startepilogue ; CHECK-NEXT: ldp x29, x30, [sp, #8] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_fplr 8 -; CHECK-NEXT: ldr x28, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x28, 0 -; CHECK-NEXT: add sp, sp, #32 -; CHECK-NEXT: .seh_stackalloc 32 +; CHECK-NEXT: ldr x28, [sp], #32 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x28, 32 ; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 0 ; CHECK-NEXT: addvl sp, sp, #1 @@ -1521,10 +1491,8 @@ define i32 @f14(double %d, <vscale x 4 x i32> %vs) "frame-pointer"="all" { ; CHECK-NEXT: .seh_allocz 1 ; CHECK-NEXT: ldp x29, x30, [sp, #8] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_fplr 8 -; CHECK-NEXT: ldr x28, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x28, 0 -; CHECK-NEXT: add sp, sp, #32 -; CHECK-NEXT: .seh_stackalloc 32 +; CHECK-NEXT: ldr x28, [sp], #32 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x28, 32 ; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 0 ; CHECK-NEXT: addvl sp, sp, #1 @@ -1572,10 +1540,8 @@ define tailcc void @f15(double %d, <vscale x 4 x i32> %vs, [9 x i64], i32 %i) { ; CHECK-NEXT: .seh_stackalloc 16 ; CHECK-NEXT: ldr x30, [sp, #8] // 8-byte Folded Reload ; CHECK-NEXT: .seh_save_reg x30, 8 -; CHECK-NEXT: ldr x28, [sp] // 8-byte Folded Reload -; CHECK-NEXT: .seh_save_reg x28, 0 -; CHECK-NEXT: add sp, sp, #16 -; CHECK-NEXT: .seh_stackalloc 16 +; CHECK-NEXT: ldr x28, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x28, 16 ; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload ; CHECK-NEXT: .seh_save_zreg z8, 0 ; CHECK-NEXT: addvl sp, sp, #1 @@ -1594,3 +1560,53 @@ define tailcc void @f15(double %d, <vscale x 4 x i32> %vs, [9 x i64], i32 %i) { store i32 %i, ptr %a ret void } + +declare ptr @llvm.swift.async.context.addr() + +define void @f16(ptr swiftasync %ctx, <vscale x 2 x i64> %foo) { +; CHECK-LABEL: f16: +; CHECK: .seh_proc f16 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: orr x29, x29, #0x1000000000000000 +; CHECK-NEXT: .seh_nop +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .seh_allocz 1 +; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_zreg z8, 0 +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .seh_stackalloc 32 +; CHECK-NEXT: stp x29, x30, [sp, #8] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 8 +; CHECK-NEXT: str x22, [sp] +; CHECK-NEXT: .seh_nop +; CHECK-NEXT: add x29, sp, #8 +; CHECK-NEXT: .seh_add_fp 8 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: //APP +; CHECK-NEXT: //NO_APP +; CHECK-NEXT: ldr x8, [x22] +; CHECK-NEXT: stur x8, [x29, #-8] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: .seh_stackalloc 16 +; CHECK-NEXT: ldp x29, x30, [sp, #8] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 8 +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: .seh_stackalloc 32 +; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_zreg z8, 0 +; CHECK-NEXT: and x29, x29, #0xefffffffffffffff +; CHECK-NEXT: .seh_nop +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: .seh_allocz 1 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc + tail call void asm sideeffect "", "~{z8}"() + %1 = load ptr, ptr %ctx, align 8 + %2 = tail call ptr @llvm.swift.async.context.addr() + store ptr %1, ptr %2, align 8 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/clamp-minmax-const-combine.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/clamp-minmax-const-combine.ll index 26b9d99..8705647 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/clamp-minmax-const-combine.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/clamp-minmax-const-combine.ll @@ -206,7 +206,7 @@ define <2 x half> @test_max_K0min_K1Val_v2f16(<2 x half> %a) #1 { ; global nnan function attribute always forces clamp combine -define float @test_min_max_global_nnan(float %a) #3 { +define float @test_min_max_global_nnan(float %a) { ; GFX10-LABEL: test_min_max_global_nnan: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -223,11 +223,11 @@ define float @test_min_max_global_nnan(float %a) #3 { ; GFX12-NEXT: v_max_num_f32_e64 v0, v0, v0 clamp ; GFX12-NEXT: s_setpc_b64 s[30:31] %maxnum = call float @llvm.maxnum.f32(float %a, float 0.0) - %fmed = call float @llvm.minnum.f32(float %maxnum, float 1.0) + %fmed = call nnan float @llvm.minnum.f32(float %maxnum, float 1.0) ret float %fmed } -define float @test_max_min_global_nnan(float %a) #3 { +define float @test_max_min_global_nnan(float %a) { ; GFX10-LABEL: test_max_min_global_nnan: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -244,7 +244,7 @@ define float @test_max_min_global_nnan(float %a) #3 { ; GFX12-NEXT: v_max_num_f32_e64 v0, v0, v0 clamp ; GFX12-NEXT: s_setpc_b64 s[30:31] %minnum = call float @llvm.minnum.f32(float %a, float 1.0) - %fmed = call float @llvm.maxnum.f32(float %minnum, float 0.0) + %fmed = call nnan float @llvm.maxnum.f32(float %minnum, float 0.0) ret float %fmed } @@ -414,5 +414,4 @@ declare <2 x half> @llvm.maxnum.v2f16(<2 x half>, <2 x half>) attributes #0 = {"amdgpu-ieee"="true"} attributes #1 = {"amdgpu-ieee"="false"} attributes #2 = {"amdgpu-ieee"="true" "amdgpu-dx10-clamp"="true"} -attributes #3 = {"no-nans-fp-math"="true"} attributes #4 = {"amdgpu-ieee"="true" "amdgpu-dx10-clamp"="false"} diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fmed3-min-max-const-combine.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fmed3-min-max-const-combine.ll index d2c93e7..696a87b 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fmed3-min-max-const-combine.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fmed3-min-max-const-combine.ll @@ -232,7 +232,7 @@ define half @test_max_K0min_K1Val_f16(half %a) #1 { ; global nnan function attribute always forces fmed3 combine -define float @test_min_max_global_nnan(float %a) #2 { +define float @test_min_max_global_nnan(float %a) { ; GFX10-LABEL: test_min_max_global_nnan: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -254,12 +254,12 @@ define float @test_min_max_global_nnan(float %a) #2 { ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_med3_num_f32 v0, v0, 2.0, 4.0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %maxnum = call float @llvm.maxnum.f32(float %a, float 2.0) + %maxnum = call nnan float @llvm.maxnum.f32(float %a, float 2.0) %fmed = call float @llvm.minnum.f32(float %maxnum, float 4.0) ret float %fmed } -define float @test_max_min_global_nnan(float %a) #2 { +define float @test_max_min_global_nnan(float %a) { ; GFX10-LABEL: test_max_min_global_nnan: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -281,8 +281,8 @@ define float @test_max_min_global_nnan(float %a) #2 { ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_med3_num_f32 v0, v0, 2.0, 4.0 ; GFX12-NEXT: s_setpc_b64 s[30:31] - %minnum = call float @llvm.minnum.f32(float %a, float 4.0) - %fmed = call float @llvm.maxnum.f32(float %minnum, float 2.0) + %minnum = call nnan float @llvm.minnum.f32(float %a, float 4.0) + %fmed = call nnan float @llvm.maxnum.f32(float %minnum, float 2.0) ret float %fmed } @@ -560,4 +560,3 @@ declare <2 x half> @llvm.minnum.v2f16(<2 x half>, <2 x half>) declare <2 x half> @llvm.maxnum.v2f16(<2 x half>, <2 x half>) attributes #0 = {"amdgpu-ieee"="true"} attributes #1 = {"amdgpu-ieee"="false"} -attributes #2 = {"no-nans-fp-math"="true"} diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll index 549af87..a43bfb5 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll @@ -1047,7 +1047,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cvt_f32_f16_e64 v1, |s1| ; CI-NEXT: v_cmp_ngt_f32_e32 vcc, v2, v1 ; CI-NEXT: s_cbranch_vccz .LBB9_2 -; CI-NEXT: ; %bb.1: ; %frem.else +; CI-NEXT: ; %bb.1: ; %frem.else20 ; CI-NEXT: s_and_b32 s2, s0, 0x8000 ; CI-NEXT: v_cmp_eq_f32_e32 vcc, v2, v1 ; CI-NEXT: v_mov_b32_e32 v0, s2 @@ -1058,7 +1058,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s2, s2, 1 ; CI-NEXT: s_cmp_lg_u32 s2, 0 ; CI-NEXT: s_cbranch_scc1 .LBB9_8 -; CI-NEXT: ; %bb.3: ; %frem.compute +; CI-NEXT: ; %bb.3: ; %frem.compute19 ; CI-NEXT: v_frexp_mant_f32_e32 v3, v1 ; CI-NEXT: v_frexp_exp_i32_f32_e32 v6, v1 ; CI-NEXT: v_ldexp_f32_e64 v1, v3, 1 @@ -1083,10 +1083,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 11, v2 ; CI-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB9_6 -; CI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; CI-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; CI-NEXT: v_add_i32_e32 v2, vcc, 11, v5 ; CI-NEXT: v_sub_i32_e32 v2, vcc, v2, v6 -; CI-NEXT: .LBB9_5: ; %frem.loop_body +; CI-NEXT: .LBB9_5: ; %frem.loop_body27 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v5, v4 ; CI-NEXT: v_mul_f32_e32 v4, v5, v3 @@ -1102,7 +1102,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB9_7 ; CI-NEXT: .LBB9_6: ; CI-NEXT: v_mov_b32_e32 v5, v4 -; CI-NEXT: .LBB9_7: ; %frem.loop_exit +; CI-NEXT: .LBB9_7: ; %frem.loop_exit28 ; CI-NEXT: v_add_i32_e32 v2, vcc, -10, v2 ; CI-NEXT: v_ldexp_f32_e32 v2, v5, v2 ; CI-NEXT: v_mul_f32_e32 v3, v2, v3 @@ -1125,7 +1125,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: ; implicit-def: $vgpr1 ; CI-NEXT: v_cmp_ngt_f32_e32 vcc, v3, v2 ; CI-NEXT: s_cbranch_vccz .LBB9_10 -; CI-NEXT: ; %bb.9: ; %frem.else20 +; CI-NEXT: ; %bb.9: ; %frem.else ; CI-NEXT: s_and_b32 s4, s2, 0x8000 ; CI-NEXT: v_cmp_eq_f32_e32 vcc, v3, v2 ; CI-NEXT: v_mov_b32_e32 v1, s4 @@ -1136,7 +1136,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s4, s4, 1 ; CI-NEXT: s_cmp_lg_u32 s4, 0 ; CI-NEXT: s_cbranch_scc1 .LBB9_16 -; CI-NEXT: ; %bb.11: ; %frem.compute19 +; CI-NEXT: ; %bb.11: ; %frem.compute ; CI-NEXT: v_frexp_mant_f32_e32 v4, v2 ; CI-NEXT: v_frexp_exp_i32_f32_e32 v7, v2 ; CI-NEXT: v_ldexp_f32_e64 v2, v4, 1 @@ -1161,10 +1161,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 11, v3 ; CI-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB9_14 -; CI-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; CI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; CI-NEXT: v_add_i32_e32 v3, vcc, 11, v6 ; CI-NEXT: v_sub_i32_e32 v3, vcc, v3, v7 -; CI-NEXT: .LBB9_13: ; %frem.loop_body27 +; CI-NEXT: .LBB9_13: ; %frem.loop_body ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v6, v5 ; CI-NEXT: v_mul_f32_e32 v5, v6, v4 @@ -1180,7 +1180,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB9_15 ; CI-NEXT: .LBB9_14: ; CI-NEXT: v_mov_b32_e32 v6, v5 -; CI-NEXT: .LBB9_15: ; %frem.loop_exit28 +; CI-NEXT: .LBB9_15: ; %frem.loop_exit ; CI-NEXT: v_add_i32_e32 v3, vcc, -10, v3 ; CI-NEXT: v_ldexp_f32_e32 v3, v6, v3 ; CI-NEXT: v_mul_f32_e32 v4, v3, v4 @@ -1237,7 +1237,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cvt_f32_f16_e64 v1, |s1| ; VI-NEXT: v_cmp_ngt_f32_e32 vcc, v2, v1 ; VI-NEXT: s_cbranch_vccz .LBB9_2 -; VI-NEXT: ; %bb.1: ; %frem.else +; VI-NEXT: ; %bb.1: ; %frem.else20 ; VI-NEXT: s_and_b32 s2, s0, 0x8000 ; VI-NEXT: v_cmp_eq_f32_e32 vcc, v2, v1 ; VI-NEXT: v_mov_b32_e32 v0, s2 @@ -1248,7 +1248,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s2, s2, 1 ; VI-NEXT: s_cmp_lg_u32 s2, 0 ; VI-NEXT: s_cbranch_scc1 .LBB9_8 -; VI-NEXT: ; %bb.3: ; %frem.compute +; VI-NEXT: ; %bb.3: ; %frem.compute19 ; VI-NEXT: v_frexp_mant_f32_e32 v3, v1 ; VI-NEXT: v_frexp_exp_i32_f32_e32 v6, v1 ; VI-NEXT: v_ldexp_f32 v1, v3, 1 @@ -1273,10 +1273,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 11, v2 ; VI-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB9_6 -; VI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; VI-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; VI-NEXT: v_add_u32_e32 v2, vcc, 11, v5 ; VI-NEXT: v_sub_u32_e32 v2, vcc, v2, v6 -; VI-NEXT: .LBB9_5: ; %frem.loop_body +; VI-NEXT: .LBB9_5: ; %frem.loop_body27 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v5, v4 ; VI-NEXT: v_mul_f32_e32 v4, v5, v3 @@ -1292,7 +1292,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB9_7 ; VI-NEXT: .LBB9_6: ; VI-NEXT: v_mov_b32_e32 v5, v4 -; VI-NEXT: .LBB9_7: ; %frem.loop_exit +; VI-NEXT: .LBB9_7: ; %frem.loop_exit28 ; VI-NEXT: v_add_u32_e32 v2, vcc, -10, v2 ; VI-NEXT: v_ldexp_f32 v2, v5, v2 ; VI-NEXT: v_mul_f32_e32 v3, v2, v3 @@ -1315,7 +1315,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: ; implicit-def: $vgpr1 ; VI-NEXT: v_cmp_ngt_f32_e32 vcc, v3, v2 ; VI-NEXT: s_cbranch_vccz .LBB9_10 -; VI-NEXT: ; %bb.9: ; %frem.else20 +; VI-NEXT: ; %bb.9: ; %frem.else ; VI-NEXT: s_and_b32 s3, s4, 0x8000 ; VI-NEXT: v_cmp_eq_f32_e32 vcc, v3, v2 ; VI-NEXT: v_mov_b32_e32 v1, s3 @@ -1326,7 +1326,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s3, s3, 1 ; VI-NEXT: s_cmp_lg_u32 s3, 0 ; VI-NEXT: s_cbranch_scc1 .LBB9_16 -; VI-NEXT: ; %bb.11: ; %frem.compute19 +; VI-NEXT: ; %bb.11: ; %frem.compute ; VI-NEXT: v_frexp_mant_f32_e32 v4, v2 ; VI-NEXT: v_frexp_exp_i32_f32_e32 v7, v2 ; VI-NEXT: v_ldexp_f32 v2, v4, 1 @@ -1351,10 +1351,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 11, v3 ; VI-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB9_14 -; VI-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; VI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; VI-NEXT: v_add_u32_e32 v3, vcc, 11, v6 ; VI-NEXT: v_sub_u32_e32 v3, vcc, v3, v7 -; VI-NEXT: .LBB9_13: ; %frem.loop_body27 +; VI-NEXT: .LBB9_13: ; %frem.loop_body ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v6, v5 ; VI-NEXT: v_mul_f32_e32 v5, v6, v4 @@ -1370,7 +1370,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB9_15 ; VI-NEXT: .LBB9_14: ; VI-NEXT: v_mov_b32_e32 v6, v5 -; VI-NEXT: .LBB9_15: ; %frem.loop_exit28 +; VI-NEXT: .LBB9_15: ; %frem.loop_exit ; VI-NEXT: v_add_u32_e32 v3, vcc, -10, v3 ; VI-NEXT: v_ldexp_f32 v3, v6, v3 ; VI-NEXT: v_mul_f32_e32 v4, v3, v4 @@ -1425,7 +1425,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cvt_f32_f16_e64 v1, |s2| ; CI-NEXT: v_cmp_ngt_f32_e32 vcc, v2, v1 ; CI-NEXT: s_cbranch_vccz .LBB10_2 -; CI-NEXT: ; %bb.1: ; %frem.else +; CI-NEXT: ; %bb.1: ; %frem.else86 ; CI-NEXT: s_and_b32 s0, s4, 0x8000 ; CI-NEXT: v_cmp_eq_f32_e32 vcc, v2, v1 ; CI-NEXT: v_mov_b32_e32 v0, s0 @@ -1436,7 +1436,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s0, s0, 1 ; CI-NEXT: s_cmp_lg_u32 s0, 0 ; CI-NEXT: s_cbranch_scc1 .LBB10_8 -; CI-NEXT: ; %bb.3: ; %frem.compute +; CI-NEXT: ; %bb.3: ; %frem.compute85 ; CI-NEXT: v_frexp_mant_f32_e32 v3, v1 ; CI-NEXT: v_frexp_exp_i32_f32_e32 v6, v1 ; CI-NEXT: v_ldexp_f32_e64 v1, v3, 1 @@ -1461,10 +1461,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 11, v2 ; CI-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB10_6 -; CI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; CI-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; CI-NEXT: v_add_i32_e32 v2, vcc, 11, v5 ; CI-NEXT: v_sub_i32_e32 v2, vcc, v2, v6 -; CI-NEXT: .LBB10_5: ; %frem.loop_body +; CI-NEXT: .LBB10_5: ; %frem.loop_body93 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v5, v4 ; CI-NEXT: v_mul_f32_e32 v4, v5, v3 @@ -1480,7 +1480,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB10_7 ; CI-NEXT: .LBB10_6: ; CI-NEXT: v_mov_b32_e32 v5, v4 -; CI-NEXT: .LBB10_7: ; %frem.loop_exit +; CI-NEXT: .LBB10_7: ; %frem.loop_exit94 ; CI-NEXT: v_add_i32_e32 v2, vcc, -10, v2 ; CI-NEXT: v_ldexp_f32_e32 v2, v5, v2 ; CI-NEXT: v_mul_f32_e32 v3, v2, v3 @@ -1503,7 +1503,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: ; implicit-def: $vgpr1 ; CI-NEXT: v_cmp_ngt_f32_e32 vcc, v3, v2 ; CI-NEXT: s_cbranch_vccz .LBB10_10 -; CI-NEXT: ; %bb.9: ; %frem.else20 +; CI-NEXT: ; %bb.9: ; %frem.else53 ; CI-NEXT: s_and_b32 s1, s6, 0x8000 ; CI-NEXT: v_cmp_eq_f32_e32 vcc, v3, v2 ; CI-NEXT: v_mov_b32_e32 v1, s1 @@ -1514,7 +1514,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s1, s1, 1 ; CI-NEXT: s_cmp_lg_u32 s1, 0 ; CI-NEXT: s_cbranch_scc1 .LBB10_16 -; CI-NEXT: ; %bb.11: ; %frem.compute19 +; CI-NEXT: ; %bb.11: ; %frem.compute52 ; CI-NEXT: v_frexp_mant_f32_e32 v4, v2 ; CI-NEXT: v_frexp_exp_i32_f32_e32 v7, v2 ; CI-NEXT: v_ldexp_f32_e64 v2, v4, 1 @@ -1539,10 +1539,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 11, v3 ; CI-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB10_14 -; CI-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; CI-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; CI-NEXT: v_add_i32_e32 v3, vcc, 11, v6 ; CI-NEXT: v_sub_i32_e32 v3, vcc, v3, v7 -; CI-NEXT: .LBB10_13: ; %frem.loop_body27 +; CI-NEXT: .LBB10_13: ; %frem.loop_body60 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v6, v5 ; CI-NEXT: v_mul_f32_e32 v5, v6, v4 @@ -1558,7 +1558,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB10_15 ; CI-NEXT: .LBB10_14: ; CI-NEXT: v_mov_b32_e32 v6, v5 -; CI-NEXT: .LBB10_15: ; %frem.loop_exit28 +; CI-NEXT: .LBB10_15: ; %frem.loop_exit61 ; CI-NEXT: v_add_i32_e32 v3, vcc, -10, v3 ; CI-NEXT: v_ldexp_f32_e32 v3, v6, v3 ; CI-NEXT: v_mul_f32_e32 v4, v3, v4 @@ -1579,7 +1579,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: ; implicit-def: $vgpr2 ; CI-NEXT: v_cmp_ngt_f32_e32 vcc, v4, v3 ; CI-NEXT: s_cbranch_vccz .LBB10_18 -; CI-NEXT: ; %bb.17: ; %frem.else53 +; CI-NEXT: ; %bb.17: ; %frem.else20 ; CI-NEXT: s_and_b32 s1, s5, 0x8000 ; CI-NEXT: v_cmp_eq_f32_e32 vcc, v4, v3 ; CI-NEXT: v_mov_b32_e32 v2, s1 @@ -1590,7 +1590,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s1, s1, 1 ; CI-NEXT: s_cmp_lg_u32 s1, 0 ; CI-NEXT: s_cbranch_scc1 .LBB10_24 -; CI-NEXT: ; %bb.19: ; %frem.compute52 +; CI-NEXT: ; %bb.19: ; %frem.compute19 ; CI-NEXT: v_frexp_mant_f32_e32 v5, v3 ; CI-NEXT: v_frexp_exp_i32_f32_e32 v8, v3 ; CI-NEXT: v_ldexp_f32_e64 v3, v5, 1 @@ -1615,10 +1615,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 11, v4 ; CI-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB10_22 -; CI-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; CI-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; CI-NEXT: v_add_i32_e32 v4, vcc, 11, v7 ; CI-NEXT: v_sub_i32_e32 v4, vcc, v4, v8 -; CI-NEXT: .LBB10_21: ; %frem.loop_body60 +; CI-NEXT: .LBB10_21: ; %frem.loop_body27 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v7, v6 ; CI-NEXT: v_mul_f32_e32 v6, v7, v5 @@ -1634,7 +1634,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB10_23 ; CI-NEXT: .LBB10_22: ; CI-NEXT: v_mov_b32_e32 v7, v6 -; CI-NEXT: .LBB10_23: ; %frem.loop_exit61 +; CI-NEXT: .LBB10_23: ; %frem.loop_exit28 ; CI-NEXT: v_add_i32_e32 v4, vcc, -10, v4 ; CI-NEXT: v_ldexp_f32_e32 v4, v7, v4 ; CI-NEXT: v_mul_f32_e32 v5, v4, v5 @@ -1657,7 +1657,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: ; implicit-def: $vgpr3 ; CI-NEXT: v_cmp_ngt_f32_e32 vcc, v5, v4 ; CI-NEXT: s_cbranch_vccz .LBB10_26 -; CI-NEXT: ; %bb.25: ; %frem.else86 +; CI-NEXT: ; %bb.25: ; %frem.else ; CI-NEXT: s_and_b32 s1, s7, 0x8000 ; CI-NEXT: v_cmp_eq_f32_e32 vcc, v5, v4 ; CI-NEXT: v_mov_b32_e32 v3, s1 @@ -1668,7 +1668,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s1, s1, 1 ; CI-NEXT: s_cmp_lg_u32 s1, 0 ; CI-NEXT: s_cbranch_scc1 .LBB10_32 -; CI-NEXT: ; %bb.27: ; %frem.compute85 +; CI-NEXT: ; %bb.27: ; %frem.compute ; CI-NEXT: v_frexp_mant_f32_e32 v6, v4 ; CI-NEXT: v_frexp_exp_i32_f32_e32 v9, v4 ; CI-NEXT: v_ldexp_f32_e64 v4, v6, 1 @@ -1693,10 +1693,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 11, v5 ; CI-NEXT: v_div_fixup_f32 v6, v6, v4, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB10_30 -; CI-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; CI-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; CI-NEXT: v_add_i32_e32 v5, vcc, 11, v8 ; CI-NEXT: v_sub_i32_e32 v5, vcc, v5, v9 -; CI-NEXT: .LBB10_29: ; %frem.loop_body93 +; CI-NEXT: .LBB10_29: ; %frem.loop_body ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v8, v7 ; CI-NEXT: v_mul_f32_e32 v7, v8, v6 @@ -1712,7 +1712,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB10_31 ; CI-NEXT: .LBB10_30: ; CI-NEXT: v_mov_b32_e32 v8, v7 -; CI-NEXT: .LBB10_31: ; %frem.loop_exit94 +; CI-NEXT: .LBB10_31: ; %frem.loop_exit ; CI-NEXT: v_add_i32_e32 v5, vcc, -10, v5 ; CI-NEXT: v_ldexp_f32_e32 v5, v8, v5 ; CI-NEXT: v_mul_f32_e32 v6, v5, v6 @@ -1791,7 +1791,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cvt_f32_f16_e64 v1, |s6| ; VI-NEXT: v_cmp_ngt_f32_e32 vcc, v2, v1 ; VI-NEXT: s_cbranch_vccz .LBB10_2 -; VI-NEXT: ; %bb.1: ; %frem.else +; VI-NEXT: ; %bb.1: ; %frem.else86 ; VI-NEXT: s_and_b32 s0, s8, 0x8000 ; VI-NEXT: v_cmp_eq_f32_e32 vcc, v2, v1 ; VI-NEXT: v_mov_b32_e32 v0, s0 @@ -1802,7 +1802,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s0, s0, 1 ; VI-NEXT: s_cmp_lg_u32 s0, 0 ; VI-NEXT: s_cbranch_scc1 .LBB10_8 -; VI-NEXT: ; %bb.3: ; %frem.compute +; VI-NEXT: ; %bb.3: ; %frem.compute85 ; VI-NEXT: v_frexp_mant_f32_e32 v3, v1 ; VI-NEXT: v_frexp_exp_i32_f32_e32 v6, v1 ; VI-NEXT: v_ldexp_f32 v1, v3, 1 @@ -1827,10 +1827,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 11, v2 ; VI-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB10_6 -; VI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; VI-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; VI-NEXT: v_add_u32_e32 v2, vcc, 11, v5 ; VI-NEXT: v_sub_u32_e32 v2, vcc, v2, v6 -; VI-NEXT: .LBB10_5: ; %frem.loop_body +; VI-NEXT: .LBB10_5: ; %frem.loop_body93 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v5, v4 ; VI-NEXT: v_mul_f32_e32 v4, v5, v3 @@ -1846,7 +1846,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB10_7 ; VI-NEXT: .LBB10_6: ; VI-NEXT: v_mov_b32_e32 v5, v4 -; VI-NEXT: .LBB10_7: ; %frem.loop_exit +; VI-NEXT: .LBB10_7: ; %frem.loop_exit94 ; VI-NEXT: v_add_u32_e32 v2, vcc, -10, v2 ; VI-NEXT: v_ldexp_f32 v2, v5, v2 ; VI-NEXT: v_mul_f32_e32 v3, v2, v3 @@ -1869,7 +1869,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: ; implicit-def: $vgpr1 ; VI-NEXT: v_cmp_ngt_f32_e32 vcc, v3, v2 ; VI-NEXT: s_cbranch_vccz .LBB10_10 -; VI-NEXT: ; %bb.9: ; %frem.else20 +; VI-NEXT: ; %bb.9: ; %frem.else53 ; VI-NEXT: s_and_b32 s0, s4, 0x8000 ; VI-NEXT: v_cmp_eq_f32_e32 vcc, v3, v2 ; VI-NEXT: v_mov_b32_e32 v1, s0 @@ -1880,7 +1880,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s0, s0, 1 ; VI-NEXT: s_cmp_lg_u32 s0, 0 ; VI-NEXT: s_cbranch_scc1 .LBB10_16 -; VI-NEXT: ; %bb.11: ; %frem.compute19 +; VI-NEXT: ; %bb.11: ; %frem.compute52 ; VI-NEXT: v_frexp_mant_f32_e32 v4, v2 ; VI-NEXT: v_frexp_exp_i32_f32_e32 v7, v2 ; VI-NEXT: v_ldexp_f32 v2, v4, 1 @@ -1905,10 +1905,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 11, v3 ; VI-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB10_14 -; VI-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; VI-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; VI-NEXT: v_add_u32_e32 v3, vcc, 11, v6 ; VI-NEXT: v_sub_u32_e32 v3, vcc, v3, v7 -; VI-NEXT: .LBB10_13: ; %frem.loop_body27 +; VI-NEXT: .LBB10_13: ; %frem.loop_body60 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v6, v5 ; VI-NEXT: v_mul_f32_e32 v5, v6, v4 @@ -1924,7 +1924,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB10_15 ; VI-NEXT: .LBB10_14: ; VI-NEXT: v_mov_b32_e32 v6, v5 -; VI-NEXT: .LBB10_15: ; %frem.loop_exit28 +; VI-NEXT: .LBB10_15: ; %frem.loop_exit61 ; VI-NEXT: v_add_u32_e32 v3, vcc, -10, v3 ; VI-NEXT: v_ldexp_f32 v3, v6, v3 ; VI-NEXT: v_mul_f32_e32 v4, v3, v4 @@ -1945,7 +1945,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: ; implicit-def: $vgpr2 ; VI-NEXT: v_cmp_ngt_f32_e32 vcc, v4, v3 ; VI-NEXT: s_cbranch_vccz .LBB10_18 -; VI-NEXT: ; %bb.17: ; %frem.else53 +; VI-NEXT: ; %bb.17: ; %frem.else20 ; VI-NEXT: s_and_b32 s0, s9, 0x8000 ; VI-NEXT: v_cmp_eq_f32_e32 vcc, v4, v3 ; VI-NEXT: v_mov_b32_e32 v2, s0 @@ -1956,7 +1956,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s0, s0, 1 ; VI-NEXT: s_cmp_lg_u32 s0, 0 ; VI-NEXT: s_cbranch_scc1 .LBB10_24 -; VI-NEXT: ; %bb.19: ; %frem.compute52 +; VI-NEXT: ; %bb.19: ; %frem.compute19 ; VI-NEXT: v_frexp_mant_f32_e32 v5, v3 ; VI-NEXT: v_frexp_exp_i32_f32_e32 v8, v3 ; VI-NEXT: v_ldexp_f32 v3, v5, 1 @@ -1981,10 +1981,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 11, v4 ; VI-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB10_22 -; VI-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; VI-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; VI-NEXT: v_add_u32_e32 v4, vcc, 11, v7 ; VI-NEXT: v_sub_u32_e32 v4, vcc, v4, v8 -; VI-NEXT: .LBB10_21: ; %frem.loop_body60 +; VI-NEXT: .LBB10_21: ; %frem.loop_body27 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v7, v6 ; VI-NEXT: v_mul_f32_e32 v6, v7, v5 @@ -2000,7 +2000,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB10_23 ; VI-NEXT: .LBB10_22: ; VI-NEXT: v_mov_b32_e32 v7, v6 -; VI-NEXT: .LBB10_23: ; %frem.loop_exit61 +; VI-NEXT: .LBB10_23: ; %frem.loop_exit28 ; VI-NEXT: v_add_u32_e32 v4, vcc, -10, v4 ; VI-NEXT: v_ldexp_f32 v4, v7, v4 ; VI-NEXT: v_mul_f32_e32 v5, v4, v5 @@ -2023,7 +2023,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: ; implicit-def: $vgpr3 ; VI-NEXT: v_cmp_ngt_f32_e32 vcc, v5, v4 ; VI-NEXT: s_cbranch_vccz .LBB10_26 -; VI-NEXT: ; %bb.25: ; %frem.else86 +; VI-NEXT: ; %bb.25: ; %frem.else ; VI-NEXT: s_and_b32 s0, s12, 0x8000 ; VI-NEXT: v_cmp_eq_f32_e32 vcc, v5, v4 ; VI-NEXT: v_mov_b32_e32 v3, s0 @@ -2034,7 +2034,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s0, s0, 1 ; VI-NEXT: s_cmp_lg_u32 s0, 0 ; VI-NEXT: s_cbranch_scc1 .LBB10_32 -; VI-NEXT: ; %bb.27: ; %frem.compute85 +; VI-NEXT: ; %bb.27: ; %frem.compute ; VI-NEXT: v_frexp_mant_f32_e32 v6, v4 ; VI-NEXT: v_frexp_exp_i32_f32_e32 v9, v4 ; VI-NEXT: v_ldexp_f32 v4, v6, 1 @@ -2059,10 +2059,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 11, v5 ; VI-NEXT: v_div_fixup_f32 v6, v6, v4, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB10_30 -; VI-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; VI-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; VI-NEXT: v_add_u32_e32 v5, vcc, 11, v8 ; VI-NEXT: v_sub_u32_e32 v5, vcc, v5, v9 -; VI-NEXT: .LBB10_29: ; %frem.loop_body93 +; VI-NEXT: .LBB10_29: ; %frem.loop_body ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v8, v7 ; VI-NEXT: v_mul_f32_e32 v7, v8, v6 @@ -2078,7 +2078,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB10_31 ; VI-NEXT: .LBB10_30: ; VI-NEXT: v_mov_b32_e32 v8, v7 -; VI-NEXT: .LBB10_31: ; %frem.loop_exit94 +; VI-NEXT: .LBB10_31: ; %frem.loop_exit ; VI-NEXT: v_add_u32_e32 v5, vcc, -10, v5 ; VI-NEXT: v_ldexp_f32 v5, v8, v5 ; VI-NEXT: v_mul_f32_e32 v6, v5, v6 @@ -2144,7 +2144,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ngt_f32_e64 vcc, |s2|, |v0| ; CI-NEXT: ; implicit-def: $vgpr0 ; CI-NEXT: s_cbranch_vccz .LBB11_2 -; CI-NEXT: ; %bb.1: ; %frem.else +; CI-NEXT: ; %bb.1: ; %frem.else16 ; CI-NEXT: s_and_b32 s6, s2, 0x80000000 ; CI-NEXT: v_mov_b32_e32 v1, s4 ; CI-NEXT: v_mov_b32_e32 v0, s2 @@ -2156,7 +2156,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s6, s6, 1 ; CI-NEXT: s_cmp_lg_u32 s6, 0 ; CI-NEXT: s_cbranch_scc1 .LBB11_8 -; CI-NEXT: ; %bb.3: ; %frem.compute +; CI-NEXT: ; %bb.3: ; %frem.compute15 ; CI-NEXT: v_frexp_mant_f32_e64 v1, |s4| ; CI-NEXT: v_ldexp_f32_e64 v1, v1, 1 ; CI-NEXT: v_div_scale_f32 v3, s[6:7], v1, v1, 1.0 @@ -2181,10 +2181,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 12, v2 ; CI-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB11_6 -; CI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; CI-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; CI-NEXT: v_add_i32_e32 v2, vcc, 12, v5 ; CI-NEXT: v_sub_i32_e32 v2, vcc, v2, v6 -; CI-NEXT: .LBB11_5: ; %frem.loop_body +; CI-NEXT: .LBB11_5: ; %frem.loop_body23 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v5, v4 ; CI-NEXT: v_mul_f32_e32 v4, v5, v3 @@ -2200,7 +2200,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB11_7 ; CI-NEXT: .LBB11_6: ; CI-NEXT: v_mov_b32_e32 v5, v4 -; CI-NEXT: .LBB11_7: ; %frem.loop_exit +; CI-NEXT: .LBB11_7: ; %frem.loop_exit24 ; CI-NEXT: v_add_i32_e32 v2, vcc, -11, v2 ; CI-NEXT: v_ldexp_f32_e32 v2, v5, v2 ; CI-NEXT: v_mul_f32_e32 v3, v2, v3 @@ -2219,7 +2219,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_mov_b32 s6, 1 ; CI-NEXT: ; implicit-def: $vgpr1 ; CI-NEXT: s_cbranch_vccz .LBB11_10 -; CI-NEXT: ; %bb.9: ; %frem.else16 +; CI-NEXT: ; %bb.9: ; %frem.else ; CI-NEXT: s_and_b32 s6, s3, 0x80000000 ; CI-NEXT: v_mov_b32_e32 v2, s5 ; CI-NEXT: v_mov_b32_e32 v1, s3 @@ -2231,7 +2231,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s6, s6, 1 ; CI-NEXT: s_cmp_lg_u32 s6, 0 ; CI-NEXT: s_cbranch_scc1 .LBB11_16 -; CI-NEXT: ; %bb.11: ; %frem.compute15 +; CI-NEXT: ; %bb.11: ; %frem.compute ; CI-NEXT: v_frexp_mant_f32_e64 v2, |s5| ; CI-NEXT: v_ldexp_f32_e64 v2, v2, 1 ; CI-NEXT: v_div_scale_f32 v4, s[6:7], v2, v2, 1.0 @@ -2256,10 +2256,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 12, v3 ; CI-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB11_14 -; CI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; CI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; CI-NEXT: v_add_i32_e32 v3, vcc, 12, v6 ; CI-NEXT: v_sub_i32_e32 v3, vcc, v3, v7 -; CI-NEXT: .LBB11_13: ; %frem.loop_body23 +; CI-NEXT: .LBB11_13: ; %frem.loop_body ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v6, v5 ; CI-NEXT: v_mul_f32_e32 v5, v6, v4 @@ -2275,7 +2275,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB11_15 ; CI-NEXT: .LBB11_14: ; CI-NEXT: v_mov_b32_e32 v6, v5 -; CI-NEXT: .LBB11_15: ; %frem.loop_exit24 +; CI-NEXT: .LBB11_15: ; %frem.loop_exit ; CI-NEXT: v_add_i32_e32 v3, vcc, -11, v3 ; CI-NEXT: v_ldexp_f32_e32 v3, v6, v3 ; CI-NEXT: v_mul_f32_e32 v4, v3, v4 @@ -2317,7 +2317,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ngt_f32_e64 vcc, |s2|, |v0| ; VI-NEXT: ; implicit-def: $vgpr0 ; VI-NEXT: s_cbranch_vccz .LBB11_2 -; VI-NEXT: ; %bb.1: ; %frem.else +; VI-NEXT: ; %bb.1: ; %frem.else16 ; VI-NEXT: s_and_b32 s6, s2, 0x80000000 ; VI-NEXT: v_mov_b32_e32 v1, s4 ; VI-NEXT: v_mov_b32_e32 v0, s2 @@ -2329,7 +2329,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s6, s6, 1 ; VI-NEXT: s_cmp_lg_u32 s6, 0 ; VI-NEXT: s_cbranch_scc1 .LBB11_8 -; VI-NEXT: ; %bb.3: ; %frem.compute +; VI-NEXT: ; %bb.3: ; %frem.compute15 ; VI-NEXT: v_frexp_mant_f32_e64 v1, |s4| ; VI-NEXT: v_ldexp_f32 v1, v1, 1 ; VI-NEXT: v_div_scale_f32 v3, s[6:7], v1, v1, 1.0 @@ -2354,10 +2354,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 12, v2 ; VI-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB11_6 -; VI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; VI-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; VI-NEXT: v_add_u32_e32 v2, vcc, 12, v5 ; VI-NEXT: v_sub_u32_e32 v2, vcc, v2, v6 -; VI-NEXT: .LBB11_5: ; %frem.loop_body +; VI-NEXT: .LBB11_5: ; %frem.loop_body23 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v5, v4 ; VI-NEXT: v_mul_f32_e32 v4, v5, v3 @@ -2373,7 +2373,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB11_7 ; VI-NEXT: .LBB11_6: ; VI-NEXT: v_mov_b32_e32 v5, v4 -; VI-NEXT: .LBB11_7: ; %frem.loop_exit +; VI-NEXT: .LBB11_7: ; %frem.loop_exit24 ; VI-NEXT: v_add_u32_e32 v2, vcc, -11, v2 ; VI-NEXT: v_ldexp_f32 v2, v5, v2 ; VI-NEXT: v_mul_f32_e32 v3, v2, v3 @@ -2392,7 +2392,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_mov_b32 s6, 1 ; VI-NEXT: ; implicit-def: $vgpr1 ; VI-NEXT: s_cbranch_vccz .LBB11_10 -; VI-NEXT: ; %bb.9: ; %frem.else16 +; VI-NEXT: ; %bb.9: ; %frem.else ; VI-NEXT: s_and_b32 s6, s3, 0x80000000 ; VI-NEXT: v_mov_b32_e32 v2, s5 ; VI-NEXT: v_mov_b32_e32 v1, s3 @@ -2404,7 +2404,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s6, s6, 1 ; VI-NEXT: s_cmp_lg_u32 s6, 0 ; VI-NEXT: s_cbranch_scc1 .LBB11_16 -; VI-NEXT: ; %bb.11: ; %frem.compute15 +; VI-NEXT: ; %bb.11: ; %frem.compute ; VI-NEXT: v_frexp_mant_f32_e64 v2, |s5| ; VI-NEXT: v_ldexp_f32 v2, v2, 1 ; VI-NEXT: v_div_scale_f32 v4, s[6:7], v2, v2, 1.0 @@ -2429,10 +2429,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 12, v3 ; VI-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB11_14 -; VI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; VI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; VI-NEXT: v_add_u32_e32 v3, vcc, 12, v6 ; VI-NEXT: v_sub_u32_e32 v3, vcc, v3, v7 -; VI-NEXT: .LBB11_13: ; %frem.loop_body23 +; VI-NEXT: .LBB11_13: ; %frem.loop_body ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v6, v5 ; VI-NEXT: v_mul_f32_e32 v5, v6, v4 @@ -2448,7 +2448,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB11_15 ; VI-NEXT: .LBB11_14: ; VI-NEXT: v_mov_b32_e32 v6, v5 -; VI-NEXT: .LBB11_15: ; %frem.loop_exit24 +; VI-NEXT: .LBB11_15: ; %frem.loop_exit ; VI-NEXT: v_add_u32_e32 v3, vcc, -11, v3 ; VI-NEXT: v_ldexp_f32 v3, v6, v3 ; VI-NEXT: v_mul_f32_e32 v4, v3, v4 @@ -2498,7 +2498,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ngt_f32_e64 vcc, |s4|, |v0| ; CI-NEXT: ; implicit-def: $vgpr0 ; CI-NEXT: s_cbranch_vccz .LBB12_2 -; CI-NEXT: ; %bb.1: ; %frem.else +; CI-NEXT: ; %bb.1: ; %frem.else78 ; CI-NEXT: s_and_b32 s2, s4, 0x80000000 ; CI-NEXT: v_mov_b32_e32 v1, s8 ; CI-NEXT: v_mov_b32_e32 v0, s4 @@ -2510,7 +2510,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s2, s2, 1 ; CI-NEXT: s_cmp_lg_u32 s2, 0 ; CI-NEXT: s_cbranch_scc1 .LBB12_8 -; CI-NEXT: ; %bb.3: ; %frem.compute +; CI-NEXT: ; %bb.3: ; %frem.compute77 ; CI-NEXT: v_frexp_mant_f32_e64 v1, |s8| ; CI-NEXT: v_ldexp_f32_e64 v1, v1, 1 ; CI-NEXT: v_div_scale_f32 v3, s[2:3], v1, v1, 1.0 @@ -2535,10 +2535,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 12, v2 ; CI-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB12_6 -; CI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; CI-NEXT: ; %bb.4: ; %frem.loop_body85.preheader ; CI-NEXT: v_add_i32_e32 v2, vcc, 12, v5 ; CI-NEXT: v_sub_i32_e32 v2, vcc, v2, v6 -; CI-NEXT: .LBB12_5: ; %frem.loop_body +; CI-NEXT: .LBB12_5: ; %frem.loop_body85 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v5, v4 ; CI-NEXT: v_mul_f32_e32 v4, v5, v3 @@ -2554,7 +2554,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB12_7 ; CI-NEXT: .LBB12_6: ; CI-NEXT: v_mov_b32_e32 v5, v4 -; CI-NEXT: .LBB12_7: ; %frem.loop_exit +; CI-NEXT: .LBB12_7: ; %frem.loop_exit86 ; CI-NEXT: v_add_i32_e32 v2, vcc, -11, v2 ; CI-NEXT: v_ldexp_f32_e32 v2, v5, v2 ; CI-NEXT: v_mul_f32_e32 v3, v2, v3 @@ -2573,7 +2573,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_mov_b32 s2, 1 ; CI-NEXT: ; implicit-def: $vgpr1 ; CI-NEXT: s_cbranch_vccz .LBB12_10 -; CI-NEXT: ; %bb.9: ; %frem.else16 +; CI-NEXT: ; %bb.9: ; %frem.else47 ; CI-NEXT: s_and_b32 s2, s5, 0x80000000 ; CI-NEXT: v_mov_b32_e32 v2, s9 ; CI-NEXT: v_mov_b32_e32 v1, s5 @@ -2585,7 +2585,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s2, s2, 1 ; CI-NEXT: s_cmp_lg_u32 s2, 0 ; CI-NEXT: s_cbranch_scc1 .LBB12_16 -; CI-NEXT: ; %bb.11: ; %frem.compute15 +; CI-NEXT: ; %bb.11: ; %frem.compute46 ; CI-NEXT: v_frexp_mant_f32_e64 v2, |s9| ; CI-NEXT: v_ldexp_f32_e64 v2, v2, 1 ; CI-NEXT: v_div_scale_f32 v4, s[2:3], v2, v2, 1.0 @@ -2610,10 +2610,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 12, v3 ; CI-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB12_14 -; CI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; CI-NEXT: ; %bb.12: ; %frem.loop_body54.preheader ; CI-NEXT: v_add_i32_e32 v3, vcc, 12, v6 ; CI-NEXT: v_sub_i32_e32 v3, vcc, v3, v7 -; CI-NEXT: .LBB12_13: ; %frem.loop_body23 +; CI-NEXT: .LBB12_13: ; %frem.loop_body54 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v6, v5 ; CI-NEXT: v_mul_f32_e32 v5, v6, v4 @@ -2629,7 +2629,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB12_15 ; CI-NEXT: .LBB12_14: ; CI-NEXT: v_mov_b32_e32 v6, v5 -; CI-NEXT: .LBB12_15: ; %frem.loop_exit24 +; CI-NEXT: .LBB12_15: ; %frem.loop_exit55 ; CI-NEXT: v_add_i32_e32 v3, vcc, -11, v3 ; CI-NEXT: v_ldexp_f32_e32 v3, v6, v3 ; CI-NEXT: v_mul_f32_e32 v4, v3, v4 @@ -2648,7 +2648,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_mov_b32 s2, 1 ; CI-NEXT: ; implicit-def: $vgpr2 ; CI-NEXT: s_cbranch_vccz .LBB12_18 -; CI-NEXT: ; %bb.17: ; %frem.else47 +; CI-NEXT: ; %bb.17: ; %frem.else16 ; CI-NEXT: s_and_b32 s2, s6, 0x80000000 ; CI-NEXT: v_mov_b32_e32 v3, s10 ; CI-NEXT: v_mov_b32_e32 v2, s6 @@ -2660,7 +2660,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s2, s2, 1 ; CI-NEXT: s_cmp_lg_u32 s2, 0 ; CI-NEXT: s_cbranch_scc1 .LBB12_24 -; CI-NEXT: ; %bb.19: ; %frem.compute46 +; CI-NEXT: ; %bb.19: ; %frem.compute15 ; CI-NEXT: v_frexp_mant_f32_e64 v3, |s10| ; CI-NEXT: v_ldexp_f32_e64 v3, v3, 1 ; CI-NEXT: v_div_scale_f32 v5, s[2:3], v3, v3, 1.0 @@ -2685,10 +2685,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 12, v4 ; CI-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB12_22 -; CI-NEXT: ; %bb.20: ; %frem.loop_body54.preheader +; CI-NEXT: ; %bb.20: ; %frem.loop_body23.preheader ; CI-NEXT: v_add_i32_e32 v4, vcc, 12, v7 ; CI-NEXT: v_sub_i32_e32 v4, vcc, v4, v8 -; CI-NEXT: .LBB12_21: ; %frem.loop_body54 +; CI-NEXT: .LBB12_21: ; %frem.loop_body23 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v7, v6 ; CI-NEXT: v_mul_f32_e32 v6, v7, v5 @@ -2704,7 +2704,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB12_23 ; CI-NEXT: .LBB12_22: ; CI-NEXT: v_mov_b32_e32 v7, v6 -; CI-NEXT: .LBB12_23: ; %frem.loop_exit55 +; CI-NEXT: .LBB12_23: ; %frem.loop_exit24 ; CI-NEXT: v_add_i32_e32 v4, vcc, -11, v4 ; CI-NEXT: v_ldexp_f32_e32 v4, v7, v4 ; CI-NEXT: v_mul_f32_e32 v5, v4, v5 @@ -2723,7 +2723,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_mov_b32 s2, 1 ; CI-NEXT: ; implicit-def: $vgpr3 ; CI-NEXT: s_cbranch_vccz .LBB12_26 -; CI-NEXT: ; %bb.25: ; %frem.else78 +; CI-NEXT: ; %bb.25: ; %frem.else ; CI-NEXT: s_and_b32 s2, s7, 0x80000000 ; CI-NEXT: v_mov_b32_e32 v4, s11 ; CI-NEXT: v_mov_b32_e32 v3, s7 @@ -2735,7 +2735,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s2, s2, 1 ; CI-NEXT: s_cmp_lg_u32 s2, 0 ; CI-NEXT: s_cbranch_scc1 .LBB12_32 -; CI-NEXT: ; %bb.27: ; %frem.compute77 +; CI-NEXT: ; %bb.27: ; %frem.compute ; CI-NEXT: v_frexp_mant_f32_e64 v4, |s11| ; CI-NEXT: v_ldexp_f32_e64 v4, v4, 1 ; CI-NEXT: v_div_scale_f32 v6, s[2:3], v4, v4, 1.0 @@ -2760,10 +2760,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 12, v5 ; CI-NEXT: v_div_fixup_f32 v6, v6, v4, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB12_30 -; CI-NEXT: ; %bb.28: ; %frem.loop_body85.preheader +; CI-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; CI-NEXT: v_add_i32_e32 v5, vcc, 12, v8 ; CI-NEXT: v_sub_i32_e32 v5, vcc, v5, v9 -; CI-NEXT: .LBB12_29: ; %frem.loop_body85 +; CI-NEXT: .LBB12_29: ; %frem.loop_body ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v8, v7 ; CI-NEXT: v_mul_f32_e32 v7, v8, v6 @@ -2779,7 +2779,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB12_31 ; CI-NEXT: .LBB12_30: ; CI-NEXT: v_mov_b32_e32 v8, v7 -; CI-NEXT: .LBB12_31: ; %frem.loop_exit86 +; CI-NEXT: .LBB12_31: ; %frem.loop_exit ; CI-NEXT: v_add_i32_e32 v5, vcc, -11, v5 ; CI-NEXT: v_ldexp_f32_e32 v5, v8, v5 ; CI-NEXT: v_mul_f32_e32 v6, v5, v6 @@ -2829,7 +2829,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ngt_f32_e64 vcc, |s4|, |v0| ; VI-NEXT: ; implicit-def: $vgpr0 ; VI-NEXT: s_cbranch_vccz .LBB12_2 -; VI-NEXT: ; %bb.1: ; %frem.else +; VI-NEXT: ; %bb.1: ; %frem.else78 ; VI-NEXT: s_and_b32 s2, s4, 0x80000000 ; VI-NEXT: v_mov_b32_e32 v1, s8 ; VI-NEXT: v_mov_b32_e32 v0, s4 @@ -2841,7 +2841,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s2, s2, 1 ; VI-NEXT: s_cmp_lg_u32 s2, 0 ; VI-NEXT: s_cbranch_scc1 .LBB12_8 -; VI-NEXT: ; %bb.3: ; %frem.compute +; VI-NEXT: ; %bb.3: ; %frem.compute77 ; VI-NEXT: v_frexp_mant_f32_e64 v1, |s8| ; VI-NEXT: v_ldexp_f32 v1, v1, 1 ; VI-NEXT: v_div_scale_f32 v3, s[2:3], v1, v1, 1.0 @@ -2866,10 +2866,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 12, v2 ; VI-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB12_6 -; VI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; VI-NEXT: ; %bb.4: ; %frem.loop_body85.preheader ; VI-NEXT: v_add_u32_e32 v2, vcc, 12, v5 ; VI-NEXT: v_sub_u32_e32 v2, vcc, v2, v6 -; VI-NEXT: .LBB12_5: ; %frem.loop_body +; VI-NEXT: .LBB12_5: ; %frem.loop_body85 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v5, v4 ; VI-NEXT: v_mul_f32_e32 v4, v5, v3 @@ -2885,7 +2885,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB12_7 ; VI-NEXT: .LBB12_6: ; VI-NEXT: v_mov_b32_e32 v5, v4 -; VI-NEXT: .LBB12_7: ; %frem.loop_exit +; VI-NEXT: .LBB12_7: ; %frem.loop_exit86 ; VI-NEXT: v_add_u32_e32 v2, vcc, -11, v2 ; VI-NEXT: v_ldexp_f32 v2, v5, v2 ; VI-NEXT: v_mul_f32_e32 v3, v2, v3 @@ -2904,7 +2904,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_mov_b32 s2, 1 ; VI-NEXT: ; implicit-def: $vgpr1 ; VI-NEXT: s_cbranch_vccz .LBB12_10 -; VI-NEXT: ; %bb.9: ; %frem.else16 +; VI-NEXT: ; %bb.9: ; %frem.else47 ; VI-NEXT: s_and_b32 s2, s5, 0x80000000 ; VI-NEXT: v_mov_b32_e32 v2, s9 ; VI-NEXT: v_mov_b32_e32 v1, s5 @@ -2916,7 +2916,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s2, s2, 1 ; VI-NEXT: s_cmp_lg_u32 s2, 0 ; VI-NEXT: s_cbranch_scc1 .LBB12_16 -; VI-NEXT: ; %bb.11: ; %frem.compute15 +; VI-NEXT: ; %bb.11: ; %frem.compute46 ; VI-NEXT: v_frexp_mant_f32_e64 v2, |s9| ; VI-NEXT: v_ldexp_f32 v2, v2, 1 ; VI-NEXT: v_div_scale_f32 v4, s[2:3], v2, v2, 1.0 @@ -2941,10 +2941,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 12, v3 ; VI-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB12_14 -; VI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; VI-NEXT: ; %bb.12: ; %frem.loop_body54.preheader ; VI-NEXT: v_add_u32_e32 v3, vcc, 12, v6 ; VI-NEXT: v_sub_u32_e32 v3, vcc, v3, v7 -; VI-NEXT: .LBB12_13: ; %frem.loop_body23 +; VI-NEXT: .LBB12_13: ; %frem.loop_body54 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v6, v5 ; VI-NEXT: v_mul_f32_e32 v5, v6, v4 @@ -2960,7 +2960,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB12_15 ; VI-NEXT: .LBB12_14: ; VI-NEXT: v_mov_b32_e32 v6, v5 -; VI-NEXT: .LBB12_15: ; %frem.loop_exit24 +; VI-NEXT: .LBB12_15: ; %frem.loop_exit55 ; VI-NEXT: v_add_u32_e32 v3, vcc, -11, v3 ; VI-NEXT: v_ldexp_f32 v3, v6, v3 ; VI-NEXT: v_mul_f32_e32 v4, v3, v4 @@ -2979,7 +2979,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_mov_b32 s2, 1 ; VI-NEXT: ; implicit-def: $vgpr2 ; VI-NEXT: s_cbranch_vccz .LBB12_18 -; VI-NEXT: ; %bb.17: ; %frem.else47 +; VI-NEXT: ; %bb.17: ; %frem.else16 ; VI-NEXT: s_and_b32 s2, s6, 0x80000000 ; VI-NEXT: v_mov_b32_e32 v3, s10 ; VI-NEXT: v_mov_b32_e32 v2, s6 @@ -2991,7 +2991,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s2, s2, 1 ; VI-NEXT: s_cmp_lg_u32 s2, 0 ; VI-NEXT: s_cbranch_scc1 .LBB12_24 -; VI-NEXT: ; %bb.19: ; %frem.compute46 +; VI-NEXT: ; %bb.19: ; %frem.compute15 ; VI-NEXT: v_frexp_mant_f32_e64 v3, |s10| ; VI-NEXT: v_ldexp_f32 v3, v3, 1 ; VI-NEXT: v_div_scale_f32 v5, s[2:3], v3, v3, 1.0 @@ -3016,10 +3016,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 12, v4 ; VI-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB12_22 -; VI-NEXT: ; %bb.20: ; %frem.loop_body54.preheader +; VI-NEXT: ; %bb.20: ; %frem.loop_body23.preheader ; VI-NEXT: v_add_u32_e32 v4, vcc, 12, v7 ; VI-NEXT: v_sub_u32_e32 v4, vcc, v4, v8 -; VI-NEXT: .LBB12_21: ; %frem.loop_body54 +; VI-NEXT: .LBB12_21: ; %frem.loop_body23 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v7, v6 ; VI-NEXT: v_mul_f32_e32 v6, v7, v5 @@ -3035,7 +3035,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB12_23 ; VI-NEXT: .LBB12_22: ; VI-NEXT: v_mov_b32_e32 v7, v6 -; VI-NEXT: .LBB12_23: ; %frem.loop_exit55 +; VI-NEXT: .LBB12_23: ; %frem.loop_exit24 ; VI-NEXT: v_add_u32_e32 v4, vcc, -11, v4 ; VI-NEXT: v_ldexp_f32 v4, v7, v4 ; VI-NEXT: v_mul_f32_e32 v5, v4, v5 @@ -3054,7 +3054,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_mov_b32 s2, 1 ; VI-NEXT: ; implicit-def: $vgpr3 ; VI-NEXT: s_cbranch_vccz .LBB12_26 -; VI-NEXT: ; %bb.25: ; %frem.else78 +; VI-NEXT: ; %bb.25: ; %frem.else ; VI-NEXT: s_and_b32 s2, s7, 0x80000000 ; VI-NEXT: v_mov_b32_e32 v4, s11 ; VI-NEXT: v_mov_b32_e32 v3, s7 @@ -3066,7 +3066,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s2, s2, 1 ; VI-NEXT: s_cmp_lg_u32 s2, 0 ; VI-NEXT: s_cbranch_scc1 .LBB12_32 -; VI-NEXT: ; %bb.27: ; %frem.compute77 +; VI-NEXT: ; %bb.27: ; %frem.compute ; VI-NEXT: v_frexp_mant_f32_e64 v4, |s11| ; VI-NEXT: v_ldexp_f32 v4, v4, 1 ; VI-NEXT: v_div_scale_f32 v6, s[2:3], v4, v4, 1.0 @@ -3091,10 +3091,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 12, v5 ; VI-NEXT: v_div_fixup_f32 v6, v6, v4, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB12_30 -; VI-NEXT: ; %bb.28: ; %frem.loop_body85.preheader +; VI-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; VI-NEXT: v_add_u32_e32 v5, vcc, 12, v8 ; VI-NEXT: v_sub_u32_e32 v5, vcc, v5, v9 -; VI-NEXT: .LBB12_29: ; %frem.loop_body85 +; VI-NEXT: .LBB12_29: ; %frem.loop_body ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v8, v7 ; VI-NEXT: v_mul_f32_e32 v7, v8, v6 @@ -3110,7 +3110,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB12_31 ; VI-NEXT: .LBB12_30: ; VI-NEXT: v_mov_b32_e32 v8, v7 -; VI-NEXT: .LBB12_31: ; %frem.loop_exit86 +; VI-NEXT: .LBB12_31: ; %frem.loop_exit ; VI-NEXT: v_add_u32_e32 v5, vcc, -11, v5 ; VI-NEXT: v_ldexp_f32 v5, v8, v5 ; VI-NEXT: v_mul_f32_e32 v6, v5, v6 @@ -3169,7 +3169,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ngt_f64_e64 vcc, |s[4:5]|, |v[0:1]| ; CI-NEXT: ; implicit-def: $vgpr0_vgpr1 ; CI-NEXT: s_cbranch_vccz .LBB13_2 -; CI-NEXT: ; %bb.1: ; %frem.else +; CI-NEXT: ; %bb.1: ; %frem.else16 ; CI-NEXT: v_mov_b32_e32 v0, s8 ; CI-NEXT: v_mov_b32_e32 v1, s9 ; CI-NEXT: v_cmp_eq_f64_e64 vcc, |s[4:5]|, |v[0:1]| @@ -3187,7 +3187,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s2, s2, 1 ; CI-NEXT: s_cmp_lg_u32 s2, 0 ; CI-NEXT: s_cbranch_scc1 .LBB13_8 -; CI-NEXT: ; %bb.3: ; %frem.compute +; CI-NEXT: ; %bb.3: ; %frem.compute15 ; CI-NEXT: v_frexp_mant_f64_e64 v[0:1], |s[4:5]| ; CI-NEXT: v_frexp_exp_i32_f64_e64 v6, |s[4:5]| ; CI-NEXT: v_frexp_exp_i32_f64_e64 v7, |s[8:9]| @@ -3210,10 +3210,10 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 26, v9 ; CI-NEXT: v_div_fixup_f64 v[2:3], v[2:3], v[0:1], 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB13_6 -; CI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; CI-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; CI-NEXT: v_add_i32_e32 v6, vcc, 26, v6 ; CI-NEXT: v_sub_i32_e32 v9, vcc, v6, v7 -; CI-NEXT: .LBB13_5: ; %frem.loop_body +; CI-NEXT: .LBB13_5: ; %frem.loop_body23 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v7, v5 ; CI-NEXT: v_mov_b32_e32 v6, v4 @@ -3232,7 +3232,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: .LBB13_6: ; CI-NEXT: v_mov_b32_e32 v7, v5 ; CI-NEXT: v_mov_b32_e32 v6, v4 -; CI-NEXT: .LBB13_7: ; %frem.loop_exit +; CI-NEXT: .LBB13_7: ; %frem.loop_exit24 ; CI-NEXT: v_add_i32_e32 v4, vcc, 0xffffffe7, v9 ; CI-NEXT: v_ldexp_f64 v[4:5], v[6:7], v4 ; CI-NEXT: s_mov_b32 s2, 0 @@ -3256,7 +3256,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_mov_b32 s2, 1 ; CI-NEXT: ; implicit-def: $vgpr2_vgpr3 ; CI-NEXT: s_cbranch_vccz .LBB13_10 -; CI-NEXT: ; %bb.9: ; %frem.else16 +; CI-NEXT: ; %bb.9: ; %frem.else ; CI-NEXT: v_mov_b32_e32 v2, s10 ; CI-NEXT: v_mov_b32_e32 v3, s11 ; CI-NEXT: v_cmp_eq_f64_e64 vcc, |s[6:7]|, |v[2:3]| @@ -3274,7 +3274,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_xor_b32 s2, s2, 1 ; CI-NEXT: s_cmp_lg_u32 s2, 0 ; CI-NEXT: s_cbranch_scc1 .LBB13_16 -; CI-NEXT: ; %bb.11: ; %frem.compute15 +; CI-NEXT: ; %bb.11: ; %frem.compute ; CI-NEXT: v_frexp_mant_f64_e64 v[2:3], |s[6:7]| ; CI-NEXT: v_frexp_exp_i32_f64_e64 v8, |s[6:7]| ; CI-NEXT: v_frexp_exp_i32_f64_e64 v9, |s[10:11]| @@ -3297,10 +3297,10 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ge_i32_e32 vcc, 26, v11 ; CI-NEXT: v_div_fixup_f64 v[4:5], v[4:5], v[2:3], 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB13_14 -; CI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; CI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; CI-NEXT: v_add_i32_e32 v8, vcc, 26, v8 ; CI-NEXT: v_sub_i32_e32 v11, vcc, v8, v9 -; CI-NEXT: .LBB13_13: ; %frem.loop_body23 +; CI-NEXT: .LBB13_13: ; %frem.loop_body ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v9, v7 ; CI-NEXT: v_mov_b32_e32 v8, v6 @@ -3319,7 +3319,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: .LBB13_14: ; CI-NEXT: v_mov_b32_e32 v9, v7 ; CI-NEXT: v_mov_b32_e32 v8, v6 -; CI-NEXT: .LBB13_15: ; %frem.loop_exit24 +; CI-NEXT: .LBB13_15: ; %frem.loop_exit ; CI-NEXT: v_add_i32_e32 v6, vcc, 0xffffffe7, v11 ; CI-NEXT: v_ldexp_f64 v[6:7], v[8:9], v6 ; CI-NEXT: s_mov_b32 s2, 0 @@ -3371,7 +3371,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ngt_f64_e64 vcc, |s[4:5]|, |v[0:1]| ; VI-NEXT: ; implicit-def: $vgpr0_vgpr1 ; VI-NEXT: s_cbranch_vccz .LBB13_2 -; VI-NEXT: ; %bb.1: ; %frem.else +; VI-NEXT: ; %bb.1: ; %frem.else16 ; VI-NEXT: v_mov_b32_e32 v0, s8 ; VI-NEXT: v_mov_b32_e32 v1, s9 ; VI-NEXT: v_cmp_eq_f64_e64 vcc, |s[4:5]|, |v[0:1]| @@ -3389,7 +3389,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s2, s2, 1 ; VI-NEXT: s_cmp_lg_u32 s2, 0 ; VI-NEXT: s_cbranch_scc1 .LBB13_8 -; VI-NEXT: ; %bb.3: ; %frem.compute +; VI-NEXT: ; %bb.3: ; %frem.compute15 ; VI-NEXT: v_frexp_mant_f64_e64 v[0:1], |s[4:5]| ; VI-NEXT: v_frexp_exp_i32_f64_e64 v6, |s[4:5]| ; VI-NEXT: v_frexp_exp_i32_f64_e64 v7, |s[8:9]| @@ -3412,10 +3412,10 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 26, v9 ; VI-NEXT: v_div_fixup_f64 v[2:3], v[2:3], v[0:1], 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB13_6 -; VI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; VI-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; VI-NEXT: v_add_u32_e32 v6, vcc, 26, v6 ; VI-NEXT: v_sub_u32_e32 v9, vcc, v6, v7 -; VI-NEXT: .LBB13_5: ; %frem.loop_body +; VI-NEXT: .LBB13_5: ; %frem.loop_body23 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v7, v5 ; VI-NEXT: v_mov_b32_e32 v6, v4 @@ -3434,7 +3434,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: .LBB13_6: ; VI-NEXT: v_mov_b32_e32 v7, v5 ; VI-NEXT: v_mov_b32_e32 v6, v4 -; VI-NEXT: .LBB13_7: ; %frem.loop_exit +; VI-NEXT: .LBB13_7: ; %frem.loop_exit24 ; VI-NEXT: v_add_u32_e32 v4, vcc, 0xffffffe7, v9 ; VI-NEXT: v_ldexp_f64 v[4:5], v[6:7], v4 ; VI-NEXT: s_mov_b32 s2, 0 @@ -3458,7 +3458,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_mov_b32 s2, 1 ; VI-NEXT: ; implicit-def: $vgpr2_vgpr3 ; VI-NEXT: s_cbranch_vccz .LBB13_10 -; VI-NEXT: ; %bb.9: ; %frem.else16 +; VI-NEXT: ; %bb.9: ; %frem.else ; VI-NEXT: v_mov_b32_e32 v2, s10 ; VI-NEXT: v_mov_b32_e32 v3, s11 ; VI-NEXT: v_cmp_eq_f64_e64 vcc, |s[6:7]|, |v[2:3]| @@ -3476,7 +3476,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_xor_b32 s2, s2, 1 ; VI-NEXT: s_cmp_lg_u32 s2, 0 ; VI-NEXT: s_cbranch_scc1 .LBB13_16 -; VI-NEXT: ; %bb.11: ; %frem.compute15 +; VI-NEXT: ; %bb.11: ; %frem.compute ; VI-NEXT: v_frexp_mant_f64_e64 v[2:3], |s[6:7]| ; VI-NEXT: v_frexp_exp_i32_f64_e64 v8, |s[6:7]| ; VI-NEXT: v_frexp_exp_i32_f64_e64 v9, |s[10:11]| @@ -3499,10 +3499,10 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ge_i32_e32 vcc, 26, v11 ; VI-NEXT: v_div_fixup_f64 v[4:5], v[4:5], v[2:3], 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB13_14 -; VI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; VI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; VI-NEXT: v_add_u32_e32 v8, vcc, 26, v8 ; VI-NEXT: v_sub_u32_e32 v11, vcc, v8, v9 -; VI-NEXT: .LBB13_13: ; %frem.loop_body23 +; VI-NEXT: .LBB13_13: ; %frem.loop_body ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v9, v7 ; VI-NEXT: v_mov_b32_e32 v8, v6 @@ -3521,7 +3521,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: .LBB13_14: ; VI-NEXT: v_mov_b32_e32 v9, v7 ; VI-NEXT: v_mov_b32_e32 v8, v6 -; VI-NEXT: .LBB13_15: ; %frem.loop_exit24 +; VI-NEXT: .LBB13_15: ; %frem.loop_exit ; VI-NEXT: v_add_u32_e32 v6, vcc, 0xffffffe7, v11 ; VI-NEXT: v_ldexp_f64 v[6:7], v[8:9], v6 ; VI-NEXT: s_mov_b32 s2, 0 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn-ieee.ll b/llvm/test/CodeGen/AMDGPU/amdgcn-ieee.ll index f96a6f7..b239c46 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn-ieee.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn-ieee.ll @@ -1,13 +1,89 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti < %s | FileCheck -check-prefix=GCN %s -; GCN-LABEL: {{^}}kernel_ieee_mode_default: -; GCN: {{buffer|global|flat}}_load_dword [[VAL0:v[0-9]+]] -; GCN: {{buffer|global|flat}}_load_dword [[VAL1:v[0-9]+]] -; GCN-DAG: v_mul_f32_e32 [[QUIET0:v[0-9]+]], 1.0, [[VAL0]] -; GCN-DAG: v_mul_f32_e32 [[QUIET1:v[0-9]+]], 1.0, [[VAL1]] -; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], [[QUIET0]], [[QUIET1]] -; GCN-NOT: v_mul_f32 define amdgpu_kernel void @kernel_ieee_mode_default() #0 { +; GCN-LABEL: kernel_ieee_mode_default: +; GCN: .amd_kernel_code_t +; GCN-NEXT: amd_code_version_major = 1 +; GCN-NEXT: amd_code_version_minor = 2 +; GCN-NEXT: amd_machine_kind = 1 +; GCN-NEXT: amd_machine_version_major = 6 +; GCN-NEXT: amd_machine_version_minor = 0 +; GCN-NEXT: amd_machine_version_stepping = 0 +; GCN-NEXT: kernel_code_entry_byte_offset = 256 +; GCN-NEXT: kernel_code_prefetch_byte_size = 0 +; GCN-NEXT: granulated_workitem_vgpr_count = 0 +; GCN-NEXT: granulated_wavefront_sgpr_count = 0 +; GCN-NEXT: priority = 0 +; GCN-NEXT: float_mode = 240 +; GCN-NEXT: priv = 0 +; GCN-NEXT: enable_dx10_clamp = 1 +; GCN-NEXT: debug_mode = 0 +; GCN-NEXT: enable_ieee_mode = 1 +; GCN-NEXT: enable_wgp_mode = 0 +; GCN-NEXT: enable_mem_ordered = 0 +; GCN-NEXT: enable_fwd_progress = 0 +; GCN-NEXT: enable_sgpr_private_segment_wave_byte_offset = 0 +; GCN-NEXT: user_sgpr_count = 12 +; GCN-NEXT: enable_trap_handler = 0 +; GCN-NEXT: enable_sgpr_workgroup_id_x = 1 +; GCN-NEXT: enable_sgpr_workgroup_id_y = 1 +; GCN-NEXT: enable_sgpr_workgroup_id_z = 1 +; GCN-NEXT: enable_sgpr_workgroup_info = 0 +; GCN-NEXT: enable_vgpr_workitem_id = 2 +; GCN-NEXT: enable_exception_msb = 0 +; GCN-NEXT: granulated_lds_size = 0 +; GCN-NEXT: enable_exception = 0 +; GCN-NEXT: enable_sgpr_private_segment_buffer = 1 +; GCN-NEXT: enable_sgpr_dispatch_ptr = 1 +; GCN-NEXT: enable_sgpr_queue_ptr = 1 +; GCN-NEXT: enable_sgpr_kernarg_segment_ptr = 1 +; GCN-NEXT: enable_sgpr_dispatch_id = 1 +; GCN-NEXT: enable_sgpr_flat_scratch_init = 0 +; GCN-NEXT: enable_sgpr_private_segment_size = 0 +; GCN-NEXT: enable_sgpr_grid_workgroup_count_x = 0 +; GCN-NEXT: enable_sgpr_grid_workgroup_count_y = 0 +; GCN-NEXT: enable_sgpr_grid_workgroup_count_z = 0 +; GCN-NEXT: enable_wavefront_size32 = 0 +; GCN-NEXT: enable_ordered_append_gds = 0 +; GCN-NEXT: private_element_size = 1 +; GCN-NEXT: is_ptr64 = 1 +; GCN-NEXT: is_dynamic_callstack = 0 +; GCN-NEXT: is_debug_enabled = 0 +; GCN-NEXT: is_xnack_enabled = 0 +; GCN-NEXT: workitem_private_segment_byte_size = 0 +; GCN-NEXT: workgroup_group_segment_byte_size = 0 +; GCN-NEXT: gds_segment_byte_size = 0 +; GCN-NEXT: kernarg_segment_byte_size = 16 +; GCN-NEXT: workgroup_fbarrier_count = 0 +; GCN-NEXT: wavefront_sgpr_count = 4 +; GCN-NEXT: workitem_vgpr_count = 2 +; GCN-NEXT: reserved_vgpr_first = 0 +; GCN-NEXT: reserved_vgpr_count = 0 +; GCN-NEXT: reserved_sgpr_first = 0 +; GCN-NEXT: reserved_sgpr_count = 0 +; GCN-NEXT: debug_wavefront_private_segment_offset_sgpr = 0 +; GCN-NEXT: debug_private_segment_buffer_sgpr = 0 +; GCN-NEXT: kernarg_segment_alignment = 4 +; GCN-NEXT: group_segment_alignment = 4 +; GCN-NEXT: private_segment_alignment = 4 +; GCN-NEXT: wavefront_size = 6 +; GCN-NEXT: call_convention = -1 +; GCN-NEXT: runtime_loader_kernel_symbol = 0 +; GCN-NEXT: .end_amd_kernel_code_t +; GCN-NEXT: ; %bb.0: +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_min_f32_e32 v0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_endpgm %val0 = load volatile float, ptr addrspace(1) poison %val1 = load volatile float, ptr addrspace(1) poison %min = call float @llvm.minnum.f32(float %val0, float %val1) @@ -15,14 +91,89 @@ define amdgpu_kernel void @kernel_ieee_mode_default() #0 { ret void } -; GCN-LABEL: {{^}}kernel_ieee_mode_on: -; GCN: {{buffer|global|flat}}_load_dword [[VAL0:v[0-9]+]] -; GCN: {{buffer|global|flat}}_load_dword [[VAL1:v[0-9]+]] -; GCN-DAG: v_mul_f32_e32 [[QUIET0:v[0-9]+]], 1.0, [[VAL0]] -; GCN-DAG: v_mul_f32_e32 [[QUIET1:v[0-9]+]], 1.0, [[VAL1]] -; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], [[QUIET0]], [[QUIET1]] -; GCN-NOT: v_mul_f32 define amdgpu_kernel void @kernel_ieee_mode_on() #1 { +; GCN-LABEL: kernel_ieee_mode_on: +; GCN: .amd_kernel_code_t +; GCN-NEXT: amd_code_version_major = 1 +; GCN-NEXT: amd_code_version_minor = 2 +; GCN-NEXT: amd_machine_kind = 1 +; GCN-NEXT: amd_machine_version_major = 6 +; GCN-NEXT: amd_machine_version_minor = 0 +; GCN-NEXT: amd_machine_version_stepping = 0 +; GCN-NEXT: kernel_code_entry_byte_offset = 256 +; GCN-NEXT: kernel_code_prefetch_byte_size = 0 +; GCN-NEXT: granulated_workitem_vgpr_count = 0 +; GCN-NEXT: granulated_wavefront_sgpr_count = 0 +; GCN-NEXT: priority = 0 +; GCN-NEXT: float_mode = 240 +; GCN-NEXT: priv = 0 +; GCN-NEXT: enable_dx10_clamp = 1 +; GCN-NEXT: debug_mode = 0 +; GCN-NEXT: enable_ieee_mode = 1 +; GCN-NEXT: enable_wgp_mode = 0 +; GCN-NEXT: enable_mem_ordered = 0 +; GCN-NEXT: enable_fwd_progress = 0 +; GCN-NEXT: enable_sgpr_private_segment_wave_byte_offset = 0 +; GCN-NEXT: user_sgpr_count = 12 +; GCN-NEXT: enable_trap_handler = 0 +; GCN-NEXT: enable_sgpr_workgroup_id_x = 1 +; GCN-NEXT: enable_sgpr_workgroup_id_y = 1 +; GCN-NEXT: enable_sgpr_workgroup_id_z = 1 +; GCN-NEXT: enable_sgpr_workgroup_info = 0 +; GCN-NEXT: enable_vgpr_workitem_id = 2 +; GCN-NEXT: enable_exception_msb = 0 +; GCN-NEXT: granulated_lds_size = 0 +; GCN-NEXT: enable_exception = 0 +; GCN-NEXT: enable_sgpr_private_segment_buffer = 1 +; GCN-NEXT: enable_sgpr_dispatch_ptr = 1 +; GCN-NEXT: enable_sgpr_queue_ptr = 1 +; GCN-NEXT: enable_sgpr_kernarg_segment_ptr = 1 +; GCN-NEXT: enable_sgpr_dispatch_id = 1 +; GCN-NEXT: enable_sgpr_flat_scratch_init = 0 +; GCN-NEXT: enable_sgpr_private_segment_size = 0 +; GCN-NEXT: enable_sgpr_grid_workgroup_count_x = 0 +; GCN-NEXT: enable_sgpr_grid_workgroup_count_y = 0 +; GCN-NEXT: enable_sgpr_grid_workgroup_count_z = 0 +; GCN-NEXT: enable_wavefront_size32 = 0 +; GCN-NEXT: enable_ordered_append_gds = 0 +; GCN-NEXT: private_element_size = 1 +; GCN-NEXT: is_ptr64 = 1 +; GCN-NEXT: is_dynamic_callstack = 0 +; GCN-NEXT: is_debug_enabled = 0 +; GCN-NEXT: is_xnack_enabled = 0 +; GCN-NEXT: workitem_private_segment_byte_size = 0 +; GCN-NEXT: workgroup_group_segment_byte_size = 0 +; GCN-NEXT: gds_segment_byte_size = 0 +; GCN-NEXT: kernarg_segment_byte_size = 16 +; GCN-NEXT: workgroup_fbarrier_count = 0 +; GCN-NEXT: wavefront_sgpr_count = 4 +; GCN-NEXT: workitem_vgpr_count = 2 +; GCN-NEXT: reserved_vgpr_first = 0 +; GCN-NEXT: reserved_vgpr_count = 0 +; GCN-NEXT: reserved_sgpr_first = 0 +; GCN-NEXT: reserved_sgpr_count = 0 +; GCN-NEXT: debug_wavefront_private_segment_offset_sgpr = 0 +; GCN-NEXT: debug_private_segment_buffer_sgpr = 0 +; GCN-NEXT: kernarg_segment_alignment = 4 +; GCN-NEXT: group_segment_alignment = 4 +; GCN-NEXT: private_segment_alignment = 4 +; GCN-NEXT: wavefront_size = 6 +; GCN-NEXT: call_convention = -1 +; GCN-NEXT: runtime_loader_kernel_symbol = 0 +; GCN-NEXT: .end_amd_kernel_code_t +; GCN-NEXT: ; %bb.0: +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_min_f32_e32 v0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_endpgm %val0 = load volatile float, ptr addrspace(1) poison %val1 = load volatile float, ptr addrspace(1) poison %min = call float @llvm.minnum.f32(float %val0, float %val1) @@ -30,14 +181,87 @@ define amdgpu_kernel void @kernel_ieee_mode_on() #1 { ret void } -; GCN-LABEL: {{^}}kernel_ieee_mode_off: -; GCN: {{buffer|global|flat}}_load_dword [[VAL0:v[0-9]+]] -; GCN: {{buffer|global|flat}}_load_dword [[VAL1:v[0-9]+]] -; GCN-NOT: [[VAL0]] -; GCN-NOT: [[VAL1]] -; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], [[VAL0]], [[VAL1]] -; GCN-NOT: v_mul_f32 define amdgpu_kernel void @kernel_ieee_mode_off() #2 { +; GCN-LABEL: kernel_ieee_mode_off: +; GCN: .amd_kernel_code_t +; GCN-NEXT: amd_code_version_major = 1 +; GCN-NEXT: amd_code_version_minor = 2 +; GCN-NEXT: amd_machine_kind = 1 +; GCN-NEXT: amd_machine_version_major = 6 +; GCN-NEXT: amd_machine_version_minor = 0 +; GCN-NEXT: amd_machine_version_stepping = 0 +; GCN-NEXT: kernel_code_entry_byte_offset = 256 +; GCN-NEXT: kernel_code_prefetch_byte_size = 0 +; GCN-NEXT: granulated_workitem_vgpr_count = 0 +; GCN-NEXT: granulated_wavefront_sgpr_count = 0 +; GCN-NEXT: priority = 0 +; GCN-NEXT: float_mode = 240 +; GCN-NEXT: priv = 0 +; GCN-NEXT: enable_dx10_clamp = 1 +; GCN-NEXT: debug_mode = 0 +; GCN-NEXT: enable_ieee_mode = 0 +; GCN-NEXT: enable_wgp_mode = 0 +; GCN-NEXT: enable_mem_ordered = 0 +; GCN-NEXT: enable_fwd_progress = 0 +; GCN-NEXT: enable_sgpr_private_segment_wave_byte_offset = 0 +; GCN-NEXT: user_sgpr_count = 12 +; GCN-NEXT: enable_trap_handler = 0 +; GCN-NEXT: enable_sgpr_workgroup_id_x = 1 +; GCN-NEXT: enable_sgpr_workgroup_id_y = 1 +; GCN-NEXT: enable_sgpr_workgroup_id_z = 1 +; GCN-NEXT: enable_sgpr_workgroup_info = 0 +; GCN-NEXT: enable_vgpr_workitem_id = 2 +; GCN-NEXT: enable_exception_msb = 0 +; GCN-NEXT: granulated_lds_size = 0 +; GCN-NEXT: enable_exception = 0 +; GCN-NEXT: enable_sgpr_private_segment_buffer = 1 +; GCN-NEXT: enable_sgpr_dispatch_ptr = 1 +; GCN-NEXT: enable_sgpr_queue_ptr = 1 +; GCN-NEXT: enable_sgpr_kernarg_segment_ptr = 1 +; GCN-NEXT: enable_sgpr_dispatch_id = 1 +; GCN-NEXT: enable_sgpr_flat_scratch_init = 0 +; GCN-NEXT: enable_sgpr_private_segment_size = 0 +; GCN-NEXT: enable_sgpr_grid_workgroup_count_x = 0 +; GCN-NEXT: enable_sgpr_grid_workgroup_count_y = 0 +; GCN-NEXT: enable_sgpr_grid_workgroup_count_z = 0 +; GCN-NEXT: enable_wavefront_size32 = 0 +; GCN-NEXT: enable_ordered_append_gds = 0 +; GCN-NEXT: private_element_size = 1 +; GCN-NEXT: is_ptr64 = 1 +; GCN-NEXT: is_dynamic_callstack = 0 +; GCN-NEXT: is_debug_enabled = 0 +; GCN-NEXT: is_xnack_enabled = 0 +; GCN-NEXT: workitem_private_segment_byte_size = 0 +; GCN-NEXT: workgroup_group_segment_byte_size = 0 +; GCN-NEXT: gds_segment_byte_size = 0 +; GCN-NEXT: kernarg_segment_byte_size = 16 +; GCN-NEXT: workgroup_fbarrier_count = 0 +; GCN-NEXT: wavefront_sgpr_count = 4 +; GCN-NEXT: workitem_vgpr_count = 2 +; GCN-NEXT: reserved_vgpr_first = 0 +; GCN-NEXT: reserved_vgpr_count = 0 +; GCN-NEXT: reserved_sgpr_first = 0 +; GCN-NEXT: reserved_sgpr_count = 0 +; GCN-NEXT: debug_wavefront_private_segment_offset_sgpr = 0 +; GCN-NEXT: debug_private_segment_buffer_sgpr = 0 +; GCN-NEXT: kernarg_segment_alignment = 4 +; GCN-NEXT: group_segment_alignment = 4 +; GCN-NEXT: private_segment_alignment = 4 +; GCN-NEXT: wavefront_size = 6 +; GCN-NEXT: call_convention = -1 +; GCN-NEXT: runtime_loader_kernel_symbol = 0 +; GCN-NEXT: .end_amd_kernel_code_t +; GCN-NEXT: ; %bb.0: +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_min_f32_e32 v0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_endpgm %val0 = load volatile float, ptr addrspace(1) poison %val1 = load volatile float, ptr addrspace(1) poison %min = call float @llvm.minnum.f32(float %val0, float %val1) @@ -45,14 +269,22 @@ define amdgpu_kernel void @kernel_ieee_mode_off() #2 { ret void } -; GCN-LABEL: {{^}}func_ieee_mode_default: -; GCN: {{buffer|global|flat}}_load_dword [[VAL0:v[0-9]+]] -; GCN: {{buffer|global|flat}}_load_dword [[VAL1:v[0-9]+]] -; GCN-DAG: v_mul_f32_e32 [[QUIET0:v[0-9]+]], 1.0, [[VAL0]] -; GCN-DAG: v_mul_f32_e32 [[QUIET1:v[0-9]+]], 1.0, [[VAL1]] -; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], [[QUIET0]], [[QUIET1]] -; GCN-NOT: v_mul_f32 define void @func_ieee_mode_default() #0 { +; GCN-LABEL: func_ieee_mode_default: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: buffer_load_dword v0, off, s[4:7], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v1, off, s[4:7], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_min_f32_e32 v0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GCN-NEXT: s_setpc_b64 s[30:31] %val0 = load volatile float, ptr addrspace(1) poison %val1 = load volatile float, ptr addrspace(1) poison %min = call float @llvm.minnum.f32(float %val0, float %val1) @@ -60,14 +292,22 @@ define void @func_ieee_mode_default() #0 { ret void } -; GCN-LABEL: {{^}}func_ieee_mode_on: -; GCN: {{buffer|global|flat}}_load_dword [[VAL0:v[0-9]+]] -; GCN: {{buffer|global|flat}}_load_dword [[VAL1:v[0-9]+]] -; GCN-DAG: v_mul_f32_e32 [[QUIET0:v[0-9]+]], 1.0, [[VAL0]] -; GCN-DAG: v_mul_f32_e32 [[QUIET1:v[0-9]+]], 1.0, [[VAL1]] -; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], [[QUIET0]], [[QUIET1]] -; GCN-NOT: v_mul_f32 define void @func_ieee_mode_on() #1 { +; GCN-LABEL: func_ieee_mode_on: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: buffer_load_dword v0, off, s[4:7], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v1, off, s[4:7], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_min_f32_e32 v0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GCN-NEXT: s_setpc_b64 s[30:31] %val0 = load volatile float, ptr addrspace(1) poison %val1 = load volatile float, ptr addrspace(1) poison %min = call float @llvm.minnum.f32(float %val0, float %val1) @@ -75,14 +315,20 @@ define void @func_ieee_mode_on() #1 { ret void } -; GCN-LABEL: {{^}}func_ieee_mode_off: -; GCN: {{buffer|global|flat}}_load_dword [[VAL0:v[0-9]+]] -; GCN: {{buffer|global|flat}}_load_dword [[VAL1:v[0-9]+]] -; GCN-NOT: [[VAL0]] -; GCN-NOT: [[VAL1]] -; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], [[VAL0]], [[VAL1]] -; GCN-NOT: v_mul_f32 define void @func_ieee_mode_off() #2 { +; GCN-LABEL: func_ieee_mode_off: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: buffer_load_dword v0, off, s[4:7], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v1, off, s[4:7], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_min_f32_e32 v0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GCN-NEXT: s_setpc_b64 s[30:31] %val0 = load volatile float, ptr addrspace(1) poison %val1 = load volatile float, ptr addrspace(1) poison %min = call float @llvm.minnum.f32(float %val0, float %val1) @@ -90,14 +336,19 @@ define void @func_ieee_mode_off() #2 { ret void } -; GCN-LABEL: {{^}}cs_ieee_mode_default: -; GCN: {{buffer|global|flat}}_load_dword [[VAL0:v[0-9]+]] -; GCN: {{buffer|global|flat}}_load_dword [[VAL1:v[0-9]+]] -; GCN-NOT: [[VAL0]] -; GCN-NOT: [[VAL1]] -; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], [[QUIET0]], [[QUIET1]] -; GCN-NOT: v_mul_f32 define amdgpu_cs void @cs_ieee_mode_default() #0 { +; GCN-LABEL: cs_ieee_mode_default: +; GCN: ; %bb.0: +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_min_f32_e32 v0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_endpgm %val0 = load volatile float, ptr addrspace(1) poison %val1 = load volatile float, ptr addrspace(1) poison %min = call float @llvm.minnum.f32(float %val0, float %val1) @@ -105,14 +356,21 @@ define amdgpu_cs void @cs_ieee_mode_default() #0 { ret void } -; GCN-LABEL: {{^}}cs_ieee_mode_on: -; GCN: {{buffer|global|flat}}_load_dword [[VAL0:v[0-9]+]] -; GCN: {{buffer|global|flat}}_load_dword [[VAL1:v[0-9]+]] -; GCN-DAG: v_mul_f32_e32 [[QUIET0:v[0-9]+]], 1.0, [[VAL0]] -; GCN-DAG: v_mul_f32_e32 [[QUIET1:v[0-9]+]], 1.0, [[VAL1]] -; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], [[QUIET0]], [[QUIET1]] -; GCN-NOT: v_mul_f32 define amdgpu_cs void @cs_ieee_mode_on() #1 { +; GCN-LABEL: cs_ieee_mode_on: +; GCN: ; %bb.0: +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_min_f32_e32 v0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_endpgm %val0 = load volatile float, ptr addrspace(1) poison %val1 = load volatile float, ptr addrspace(1) poison %min = call float @llvm.minnum.f32(float %val0, float %val1) @@ -120,14 +378,19 @@ define amdgpu_cs void @cs_ieee_mode_on() #1 { ret void } -; GCN-LABEL: {{^}}cs_ieee_mode_off: -; GCN: {{buffer|global|flat}}_load_dword [[VAL0:v[0-9]+]] -; GCN: {{buffer|global|flat}}_load_dword [[VAL1:v[0-9]+]] -; GCN-NOT: [[VAL0]] -; GCN-NOT: [[VAL1]] -; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], [[VAL0]], [[VAL1]] -; GCN-NOT: v_mul_f32 define amdgpu_cs void @cs_ieee_mode_off() #2 { +; GCN-LABEL: cs_ieee_mode_off: +; GCN: ; %bb.0: +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_min_f32_e32 v0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_endpgm %val0 = load volatile float, ptr addrspace(1) poison %val1 = load volatile float, ptr addrspace(1) poison %min = call float @llvm.minnum.f32(float %val0, float %val1) @@ -135,14 +398,19 @@ define amdgpu_cs void @cs_ieee_mode_off() #2 { ret void } -; GCN-LABEL: {{^}}ps_ieee_mode_default: -; GCN: {{buffer|global|flat}}_load_dword [[VAL0:v[0-9]+]] -; GCN: {{buffer|global|flat}}_load_dword [[VAL1:v[0-9]+]] -; GCN-NOT: [[VAL0]] -; GCN-NOT: [[VAL1]] -; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], [[VAL0]], [[VAL1]] -; GCN-NOT: v_mul_f32 define amdgpu_ps void @ps_ieee_mode_default() #0 { +; GCN-LABEL: ps_ieee_mode_default: +; GCN: ; %bb.0: +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_min_f32_e32 v0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_endpgm %val0 = load volatile float, ptr addrspace(1) poison %val1 = load volatile float, ptr addrspace(1) poison %min = call float @llvm.minnum.f32(float %val0, float %val1) @@ -150,14 +418,21 @@ define amdgpu_ps void @ps_ieee_mode_default() #0 { ret void } -; GCN-LABEL: {{^}}ps_ieee_mode_on: -; GCN: {{buffer|global|flat}}_load_dword [[VAL0:v[0-9]+]] -; GCN: {{buffer|global|flat}}_load_dword [[VAL1:v[0-9]+]] -; GCN-DAG: v_mul_f32_e32 [[QUIET0:v[0-9]+]], 1.0, [[VAL0]] -; GCN-DAG: v_mul_f32_e32 [[QUIET1:v[0-9]+]], 1.0, [[VAL1]] -; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], [[QUIET0]], [[QUIET1]] -; GCN-NOT: v_mul_f32 define amdgpu_ps void @ps_ieee_mode_on() #1 { +; GCN-LABEL: ps_ieee_mode_on: +; GCN: ; %bb.0: +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_min_f32_e32 v0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_endpgm %val0 = load volatile float, ptr addrspace(1) poison %val1 = load volatile float, ptr addrspace(1) poison %min = call float @llvm.minnum.f32(float %val0, float %val1) @@ -165,14 +440,19 @@ define amdgpu_ps void @ps_ieee_mode_on() #1 { ret void } -; GCN-LABEL: {{^}}ps_ieee_mode_off: -; GCN: {{buffer|global|flat}}_load_dword [[VAL0:v[0-9]+]] -; GCN: {{buffer|global|flat}}_load_dword [[VAL1:v[0-9]+]] -; GCN-NOT: [[VAL0]] -; GCN-NOT: [[VAL1]] -; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], [[VAL0]], [[VAL1]] -; GCN-NOT: v_mul_f32 define amdgpu_ps void @ps_ieee_mode_off() #2 { +; GCN-LABEL: ps_ieee_mode_off: +; GCN: ; %bb.0: +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_min_f32_e32 v0, v0, v1 +; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_endpgm %val0 = load volatile float, ptr addrspace(1) poison %val1 = load volatile float, ptr addrspace(1) poison %min = call float @llvm.minnum.f32(float %val0, float %val1) diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll index df9c97f..117af95 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll @@ -6551,271 +6551,205 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v39.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v66.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v1.l, v33.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v33.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v162.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v39.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v161.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v160.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v39, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v2.l, v33.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v65.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v65.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v35.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v39, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v3.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v39.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v36.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v149.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v39, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v37.h, 8, v148.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v36.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v145.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v39, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v37.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v36.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v39, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v52.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v39, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v39, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v39, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v16.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v49.l -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v39, v16 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v39, v18 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v39, v20 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l -; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v39, v22 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v39, v24 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v84.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v39, v26 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v83.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v82.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v81.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l -; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l -; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v39, v28 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v29.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v34.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v80.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v71.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v31.l -; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v39, v29 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v30.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v34.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v33.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v32.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.h, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v39, v30 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v31.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v33.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v69.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v68.l ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v39, v31 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v32.l, v33.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v32.h, v33.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v39.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, v39, v32 +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v133.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v132.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v52.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v131.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v130.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v9.h, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v5.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.h, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v129.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v13.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v51.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v14.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v118.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v10.l, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v6.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v7.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v8.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.l, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v50.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v117.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v16.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v116.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v115.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v49.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v10.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v11.h, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v18.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v114.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v112.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v12.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v13.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v14.l, v14.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v20.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v102.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v21.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v101.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v21.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v38.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v22.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v100.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v22.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v99.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v15.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v16.h, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v98.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v23.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v37.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v24.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v24.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v87.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v17.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v18.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v19.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v25.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v36.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v26.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v86.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v26.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v85.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v84.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v35.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v18.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.h, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v20.h, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v21.h, v22.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v28.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v83.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v28.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v82.l +; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v29.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v81.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v29.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v34.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v30.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v20.l, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v21.l, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v22.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v23.l, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v24.l, v24.h +; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v30.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v71.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.l, 8, v70.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v31.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v33.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v32.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.l, 8, v69.l +; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v32.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v68.l +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v23.h, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v25.h, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v26.h, v27.l ; GFX11-TRUE16-NEXT: s_clause 0x5 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:32 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:48 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:64 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:80 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[25:28], off offset:96 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[29:32], off offset:112 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:48 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:64 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: bitcast_v32i32_to_v128i8: @@ -15709,61 +15643,61 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v32, off, s32 offset:380 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v31, off, s32 offset:376 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v32, off, s32 offset:372 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v50, off, s32 offset:368 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v51, off, s32 offset:368 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v33, off, s32 offset:364 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v51, off, s32 offset:360 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v33, off, s32 offset:356 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v51, off, s32 offset:352 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v50, off, s32 offset:352 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v34, off, s32 offset:348 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v52, off, s32 offset:344 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v34, off, s32 offset:340 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v52, off, s32 offset:336 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v35, off, s32 offset:332 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v53, off, s32 offset:328 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v54, off, s32 offset:328 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v35, off, s32 offset:324 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v53, off, s32 offset:320 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v54, off, s32 offset:320 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v36, off, s32 offset:316 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v54, off, s32 offset:312 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v53, off, s32 offset:312 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v36, off, s32 offset:308 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v54, off, s32 offset:304 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v55, off, s32 offset:304 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v37, off, s32 offset:300 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v55, off, s32 offset:296 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v55, off, s32 offset:296 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v37, off, s32 offset:292 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v64, off, s32 offset:288 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v67, off, s32 offset:288 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v38, off, s32 offset:284 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v64, off, s32 offset:280 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v66, off, s32 offset:280 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v38, off, s32 offset:276 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v66, off, s32 offset:272 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v39, off, s32 offset:268 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v66, off, s32 offset:264 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v67, off, s32 offset:264 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v39, off, s32 offset:260 ; GFX11-TRUE16-NEXT: s_clause 0x1f -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v67, off, s32 offset:256 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v68, off, s32 offset:256 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v48, off, s32 offset:252 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v68, off, s32 offset:248 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v71, off, s32 offset:248 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v48, off, s32 offset:244 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v70, off, s32 offset:240 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v71, off, s32 offset:240 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v49, off, s32 offset:236 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v70, off, s32 offset:232 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v49, off, s32 offset:228 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v71, off, s32 offset:224 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v80, off, s32 offset:224 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v50, off, s32 offset:220 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v71, off, s32 offset:216 -; GFX11-TRUE16-NEXT: scratch_load_b32 v114, off, s32 offset:388 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v81, off, s32 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v82, off, s32 offset:8 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v83, off, s32 offset:16 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v83, off, s32 offset:24 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v85, off, s32 offset:32 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v85, off, s32 offset:40 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v87, off, s32 offset:48 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v87, off, s32 offset:56 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v97, off, s32 offset:64 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v97, off, s32 offset:72 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v98, off, s32 offset:80 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v99, off, s32 offset:88 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v100, off, s32 offset:96 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v101, off, s32 offset:104 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v80, off, s32 offset:216 +; GFX11-TRUE16-NEXT: scratch_load_b32 v113, off, s32 offset:388 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v83, off, s32 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v84, off, s32 offset:8 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v84, off, s32 offset:16 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v85, off, s32 offset:24 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v85, off, s32 offset:32 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v96, off, s32 offset:40 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v96, off, s32 offset:48 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v97, off, s32 offset:56 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v97, off, s32 offset:64 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v98, off, s32 offset:72 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v100, off, s32 offset:80 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v101, off, s32 offset:88 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v101, off, s32 offset:96 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v102, off, s32 offset:104 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v102, off, s32 offset:112 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v160, off, s32 offset:120 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v160, off, s32 offset:128 @@ -15778,121 +15712,123 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v164, off, s32 offset:192 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v165, off, s32 offset:200 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v165, off, s32 offset:208 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v55, off, s32 offset:212 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v65, off, s32 offset:204 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v65, off, s32 offset:196 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v67, off, s32 offset:188 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v68, off, s32 offset:180 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v69, off, s32 offset:172 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v69, off, s32 offset:164 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v80, off, s32 offset:156 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v80, off, s32 offset:148 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v53, off, s32 offset:212 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v64, off, s32 offset:204 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v64, off, s32 offset:196 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v65, off, s32 offset:188 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v65, off, s32 offset:180 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v68, off, s32 offset:172 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v69, off, s32 offset:164 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v69, off, s32 offset:156 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v70, off, s32 offset:148 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v81, off, s32 offset:140 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v82, off, s32 offset:132 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v84, off, s32 offset:124 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v84, off, s32 offset:116 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v86, off, s32 offset:108 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v86, off, s32 offset:100 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v96, off, s32 offset:92 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v96, off, s32 offset:84 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v98, off, s32 offset:76 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v99, off, s32 offset:68 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v100, off, s32 offset:60 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v101, off, s32 offset:52 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v103, off, s32 offset:44 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v103, off, s32 offset:36 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v112, off, s32 offset:28 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v113, off, s32 offset:20 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v81, off, s32 offset:132 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v82, off, s32 offset:124 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v82, off, s32 offset:116 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v83, off, s32 offset:108 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v86, off, s32 offset:100 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v86, off, s32 offset:92 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v87, off, s32 offset:84 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v87, off, s32 offset:76 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v98, off, s32 offset:68 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v99, off, s32 offset:60 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v99, off, s32 offset:52 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v100, off, s32 offset:44 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v103, off, s32 offset:36 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v103, off, s32 offset:28 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v112, off, s32 offset:20 ; GFX11-TRUE16-NEXT: s_clause 0x1 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v115, off, s32 offset:12 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v112, off, s32 offset:12 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v115, off, s32 offset:4 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v117.l, v30.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v118.h, v28.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v119.l, v26.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v119.h, v24.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.l, v22.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.h, v20.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.l, v18.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v116.l, v30.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v116.h, v28.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v119.h, v26.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.l, v24.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.h, v22.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.l, v20.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v18.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v132.h, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v146.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.l, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.l, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v6.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v146.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.l, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.l, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v144.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v145.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.h, 8, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v146.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.l, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.l, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.h, 8, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v134.h, 8, v19.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v135.l, 8, v21.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v135.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v133.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v133.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v131.h, 8, v29.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(54) -; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v114 -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(17) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v132.l, 8, v81.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(16) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v129.l, 8, v82.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v129.h, 8, v83.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v128.l, 8, v83.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v128.h, 8, v85.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v117.h, 8, v85.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v118.l, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v116.l, 8, v87.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v116.h, 8, v97.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.l, 8, v97.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(9) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.h, 8, v98.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v112.h, 8, v99.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v113.l, 8, v100.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.l, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.h, 8, v102.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v100.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v101.l, 8, v160.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v98.h, 8, v161.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v99.l, 8, v161.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.l, 8, v162.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.h, 8, v162.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v87.l, 8, v163.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v87.h, 8, v163.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.l, 8, v164.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.h, 8, v164.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v83.l, 8, v165.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v83.h, 8, v165.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v81.h, 8, v71.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v82.l, 8, v71.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.l, 8, v70.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v70.l, 8, v68.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v70.h, 8, v67.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v67.h, 8, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v68.l, 8, v66.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.l, 8, v64.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.h, 8, v64.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v64.l, 8, v55.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v64.h, 8, v54.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v54.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.l, 8, v53.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v52.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.h, 8, v52.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.l, 8, v51.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.l, 8, v50.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.h, 8, v31.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v144.l, 8, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v144.h, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v130.l, 8, v29.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(62) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v51.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v51.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(56) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.h, 8, v50.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v54.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v54.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(26) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.h, 8, v53.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v67.l, 8, v67.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.h, 8, v66.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.l, 8, v66.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.h, 8, v71.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.l, 8, v71.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(18) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v70.h, 8, v70.h +; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v113 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v130.h, 8, v83.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v131.l, 8, v84.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v131.h, 8, v84.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v132.l, 8, v85.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v117.l, 8, v85.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v117.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v118.l, 8, v96.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v118.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v119.l, 8, v97.h +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(8) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v113.l, 8, v98.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v113.h, 8, v100.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.l, 8, v101.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.h, 8, v101.h +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v115.l, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v100.h, 8, v102.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v101.l, 8, v160.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v101.h, 8, v160.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.l, 8, v161.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.h, 8, v161.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v96.l, 8, v162.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v96.h, 8, v162.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.l, 8, v163.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.h, 8, v163.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v98.l, 8, v164.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v83.h, 8, v164.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v84.l, 8, v165.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v84.h, 8, v165.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.l, 8, v80.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v80.l, 8, v68.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v80.h, 8, v67.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v67.h, 8, v55.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v68.l, 8, v55.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.l, 8, v52.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.h, 8, v52.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.l, 8, v31.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.h, 8, v31.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) @@ -15903,215 +15839,179 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB14_4 ; GFX11-TRUE16-NEXT: .LBB14_2: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; GFX11-TRUE16-NEXT: .LBB14_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v149.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v146.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v146.l -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v0.l, v151.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v151.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v0.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v150.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v3.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v145.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v144.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v149, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v1.h, v150.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v146.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v145.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v145.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v134.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v133.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v133.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v132.h -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v131.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v148.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v130.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v149, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v2.l, v148.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v2.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v134.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v134.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v145.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v130.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v149, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v3.l, v147.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v147.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v3.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v135.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v119.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v119.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v118.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v149, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v4.l, v144.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v4.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v133.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v117.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v129.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v129.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v128.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v119.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v116.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v116.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v115.h -; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v115.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v149, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v5.l, v135.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v5.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v132.l -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v129.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v113.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v112.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v149, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v6.l, v133.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v6.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v103.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v128.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v103.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v101.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v149, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v7.l, v131.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v7.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v118.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v100.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v99.h -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v98.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v149, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v8.l, v129.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v8.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v116.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v114.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v96.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v96.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v149, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v9.l, v128.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v9.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v86.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v113.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v86.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v84.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v149, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v10.l, v117.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v10.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v102.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v84.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v82.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v112.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v112.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v103.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v103.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v100.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v99.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v99.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v98.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v87.h +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v87.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v86.h +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v86.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v83.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v82.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v82.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v81.h ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v81.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v149, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v11.l, v116.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v11.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v101.l -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v99.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v80.h -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v80.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v149, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v12.l, v114.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v12.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v69.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v97.h -; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v69.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v68.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v149, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v13.l, v112.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v13.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v87.h -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v67.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v65.h -; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v65.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v149, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v14.l, v102.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v14.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v85.h -; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v83.h -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v55.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v70.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v69.h +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v69.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v68.h +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v65.h +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v65.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v64.h +; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v64.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v50.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v149, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v15.l, v100.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v15.l, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v49.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v82.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v49.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v48.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v149, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v16.l, v98.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v16.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v71.h ; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v48.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v39.h ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v39.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v149, v16 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v17.l, v97.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v17.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v70.h -; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v68.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v38.h ; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v38.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v149, v17 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v18.l, v87.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v18.l, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v37.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v66.h ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v37.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v36.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v149, v18 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v19.l, v85.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v19.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v64.h ; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v35.h ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v35.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v149, v19 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v20.l, v83.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v20.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v55.l -; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v34.h ; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v34.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v149, v20 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v21.l, v81.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v21.l, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v33.l ; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v149, v21 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v22.l, v71.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v22.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v52.l ; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v32.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v150.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v150.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v151.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v151.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v146.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v147.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v147.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v148.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v148.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v134.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v135.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v135.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v144.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v144.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v130.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v130.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v131.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v131.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v132.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v117.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v117.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v118.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v118.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v119.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v113.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v113.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v114.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v114.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v115.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v100.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v101.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v101.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v16.l, v102.l +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v102.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v17.l, v96.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v96.h +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v18.l, v97.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v97.h +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v19.l, v98.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v83.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v20.l, v84.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v84.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v21.l, v85.l +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v85.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v22.l, v70.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v71.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v23.l, v71.h +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v80.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v24.l, v80.h +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v66.l +; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v25.l, v66.h +; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v67.l +; GFX11-TRUE16-NEXT: v_or_b16 v26.l, v26.l, v67.h +; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v68.l +; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v27.l, v53.h +; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v28.l, v28.l, v54.h +; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v55.l +; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v29.l, v55.h +; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v50.h +; GFX11-TRUE16-NEXT: v_or_b16 v30.l, v30.l, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v51.h +; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v31.l, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v52.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_lo16 @@ -16133,433 +16033,329 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v149, v22 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v23.l, v70.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v23.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v51.l -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v149, v23 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v24.l, v67.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v24.l, v149.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v149, v24 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v25.l, v66.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v25.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v149, v25 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v26.l, v64.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v26.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v149, v26 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v27.l, v54.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v27.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v149, v27 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v28.l, v53.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v28.l, v149.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v149, v28 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v29.l, v52.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v29.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v149, v29 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v30.l, v51.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v149, v30 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v31.l, v50.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v149.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v149, v31 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB14_2 ; GFX11-TRUE16-NEXT: .LBB14_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v149.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v149.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v146.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v146.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v146.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v145.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v145.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v134.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v133.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v133.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v132.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v129.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v129.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v128.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v128.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v119.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v116.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v116.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v115.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v112.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, v112.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, v103.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v103.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v100.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, v99.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, v99.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v98.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v87.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, v87.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, v86.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v86.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v83.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, v82.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, v82.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, v81.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, v81.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.l, v70.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, v69.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, v69.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, v68.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.l, v65.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.h, v65.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, v64.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, v64.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, v53.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.h, v50.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, v49.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, v49.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.l, v48.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.h, v48.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, v39.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, v39.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, v38.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.h, v38.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, v37.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, v37.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.l, v36.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.h, v36.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, v35.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, v35.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.l, v34.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.h, v34.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, v33.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, v33.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, v32.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.h, v32.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v151.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v151.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v150.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v150.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v145.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v144.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v134.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v31, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v134.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v148.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v148.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v31, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v31.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v147.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v147.h, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v132.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v131.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v31, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v130.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v130.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v144.h, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v145.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v31, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v135.l, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v135.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v119.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v119.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v31, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v118.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v117.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v133.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v133.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v31, v8 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v131.h, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v132.l, v6.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v115.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v115.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v31, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v113.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v112.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v129.l, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v129.h, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v31, v10 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v128.l, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v128.h, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v103.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v103.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v31, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v101.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v100.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v117.h, v9.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v118.l, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v31, v12 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v116.l, v10.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v116.h, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v99.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v98.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v31, v13 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v96.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v96.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v114.l, v11.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v114.h, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v31, v14 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v112.h, v12.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v113.l, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v86.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v86.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v31, v15 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v84.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v84.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v102.l, v13.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v102.h, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v31, v16 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v100.h, v14.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v101.l, v14.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, v82.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, v81.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v31, v17 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v16.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, v80.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, v80.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v98.h, v15.l -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v99.l, v15.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v31, v18 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v17.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.h, 0x300, v17.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v97.l, v16.l -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v97.h, v16.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, v69.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, v69.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v31, v19 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v17.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, 0x300, v17.h -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, v68.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, v67.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v87.l, v17.l -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v87.h, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v18.l ; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v31, v20 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v19.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.h, 0x300, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v85.l, v18.l -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v85.h, v18.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, v65.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, v65.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v31, v21 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v19.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, 0x300, v19.h -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, v55.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, v50.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v83.l, v19.l -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v83.h, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v31, v22 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v21.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.h, 0x300, v21.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v81.h, v20.l -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v82.l, v20.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, v49.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, v49.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v31, v23 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v21.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, 0x300, v21.h -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, v48.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, v48.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v71.l, v21.l -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v71.h, v21.h +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v31, v24 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v23.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.h, 0x300, v23.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v70.l, v22.l -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v70.h, v22.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, v39.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, v39.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v31, v25 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v23.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, 0x300, v23.h -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v24.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, v38.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, v38.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v67.h, v23.l -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v68.l, v23.h +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v31, v26 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v25.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.h, 0x300, v25.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v66.l, v24.l -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v66.h, v24.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, v37.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, v37.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v31, v27 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v25.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, 0x300, v25.h -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, v36.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, v36.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v64.l, v25.l -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v64.h, v25.h +; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v31, v28 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v27.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.h, 0x300, v27.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v54.h, v26.l -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v55.l, v26.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, v35.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, v35.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v31, v29 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v27.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, 0x300, v27.h -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, v34.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, v34.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v53.h, v27.l -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v54.l, v27.h +; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l +; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v31, v30 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v29.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v34.h, 0x300, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v52.h, v28.l -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v53.l, v28.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, v33.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, v33.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v31, v34 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v29.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v33.h, 0x300, v29.h -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v30.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, v32.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, v32.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v32.l, v51.h, v29.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v52.l, v29.h +; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v31, v33 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v32.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v33.h, 0x300, v32.h -; GFX11-TRUE16-NEXT: v_or_b16 v32.l, v50.h, v30.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v51.l, v30.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v31, v33 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v32.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v32.h, 0x300, v32.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v31, v32 +; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v31.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v150.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v150.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v151.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v151.h, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v146.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v147.l, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v147.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v148.l, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v148.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v134.h, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v135.l, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v135.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v144.l, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v144.h, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v130.l, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v130.h, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v131.l, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v131.h, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v132.l, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v117.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v117.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v118.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v118.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v119.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v113.l, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v113.h, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v114.l, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v114.h, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v115.l, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v100.h, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v101.l, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v101.h, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v102.l, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v102.h, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v96.l, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v96.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v97.l, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v97.h, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v98.l, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v83.h, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v84.l, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v84.h, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v85.l, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v85.h, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v70.h, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v71.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v71.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v80.l, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v80.h, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v66.l, v24.h +; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v66.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v67.l, v25.h +; GFX11-TRUE16-NEXT: v_or_b16 v26.l, v67.h, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v68.l, v26.h +; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v53.h, v27.l +; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v54.l, v27.h +; GFX11-TRUE16-NEXT: v_or_b16 v28.l, v54.h, v28.l +; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v55.l, v28.h +; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v55.h, v29.l +; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v50.h, v29.h +; GFX11-TRUE16-NEXT: v_or_b16 v30.l, v51.l, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v51.h, v30.h +; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v52.l, v31.l +; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v52.h, v31.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, 0x300, v8.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v8.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v9.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v9.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v10.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v10.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, 0x300, v11.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v11.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, 0x300, v12.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v12.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, 0x300, v13.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v13.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, 0x300, v14.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v14.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, 0x300, v15.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v15.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, 0x300, v16.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, 0x300, v16.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.l, 0x300, v17.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, 0x300, v17.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, 0x300, v18.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, 0x300, v18.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.l, 0x300, v19.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.h, 0x300, v19.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, 0x300, v20.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, 0x300, v20.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v21.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.h, 0x300, v21.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, 0x300, v22.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, 0x300, v22.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.l, 0x300, v23.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.h, 0x300, v23.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, 0x300, v24.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, 0x300, v24.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v25.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.h, 0x300, v25.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, 0x300, v26.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, 0x300, v26.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.l, 0x300, v27.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.h, 0x300, v27.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, 0x300, v28.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, 0x300, v28.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.l, 0x300, v29.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.h, 0x300, v29.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, 0x300, v30.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, 0x300, v30.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v31.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.h, 0x300, v31.h ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -42692,271 +42488,205 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v39.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v66.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v1.l, v33.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v33.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v162.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v39.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v161.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v160.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v39, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v2.l, v33.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v65.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v65.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v35.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v39, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v3.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v39.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v36.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v149.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v39, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v37.h, 8, v148.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v36.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v145.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v39, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v37.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v36.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v39, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v52.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v39, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v39, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v39, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v16.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v49.l -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v39, v16 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v39, v18 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v39, v20 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l -; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v39, v22 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v39, v24 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v84.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v39, v26 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v83.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v82.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v81.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l -; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l -; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v39, v28 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v29.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v34.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v80.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v71.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v31.l -; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v39, v29 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v30.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v34.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v33.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v32.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.h, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v39, v30 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v31.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v33.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v69.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v68.l ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v39, v31 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v32.l, v33.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v32.h, v33.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v39.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, v39, v32 +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v133.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v132.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v52.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v131.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v130.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v9.h, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v5.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.h, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v129.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v13.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v51.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v14.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v118.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v10.l, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v6.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v7.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v8.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.l, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v50.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v117.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v16.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v116.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v115.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v49.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v10.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v11.h, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v18.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v114.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v112.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v12.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v13.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v14.l, v14.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v20.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v102.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v21.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v101.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v21.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v38.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v22.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v100.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v22.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v99.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v15.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v16.h, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v98.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v23.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v37.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v24.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v24.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v87.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v17.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v18.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v19.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v25.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v36.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v26.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v86.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v26.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v85.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v84.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v35.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v18.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.h, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v20.h, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v21.h, v22.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v28.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v83.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v28.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v82.l +; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v29.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v81.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v29.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v34.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v30.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v20.l, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v21.l, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v22.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v23.l, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v24.l, v24.h +; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v30.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v71.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.l, 8, v70.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v31.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v33.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v32.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.l, 8, v69.l +; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v32.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v68.l +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v23.h, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v25.h, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v26.h, v27.l ; GFX11-TRUE16-NEXT: s_clause 0x5 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:32 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:48 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:64 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:80 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[25:28], off offset:96 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[29:32], off offset:112 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:48 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:64 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: bitcast_v32f32_to_v128i8: @@ -53003,61 +52733,61 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v32, off, s32 offset:380 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v31, off, s32 offset:376 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v32, off, s32 offset:372 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v50, off, s32 offset:368 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v51, off, s32 offset:368 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v33, off, s32 offset:364 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v51, off, s32 offset:360 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v33, off, s32 offset:356 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v51, off, s32 offset:352 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v50, off, s32 offset:352 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v34, off, s32 offset:348 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v52, off, s32 offset:344 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v34, off, s32 offset:340 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v52, off, s32 offset:336 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v35, off, s32 offset:332 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v53, off, s32 offset:328 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v54, off, s32 offset:328 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v35, off, s32 offset:324 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v53, off, s32 offset:320 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v54, off, s32 offset:320 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v36, off, s32 offset:316 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v54, off, s32 offset:312 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v53, off, s32 offset:312 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v36, off, s32 offset:308 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v54, off, s32 offset:304 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v55, off, s32 offset:304 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v37, off, s32 offset:300 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v55, off, s32 offset:296 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v55, off, s32 offset:296 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v37, off, s32 offset:292 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v64, off, s32 offset:288 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v67, off, s32 offset:288 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v38, off, s32 offset:284 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v64, off, s32 offset:280 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v66, off, s32 offset:280 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v38, off, s32 offset:276 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v66, off, s32 offset:272 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v39, off, s32 offset:268 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v66, off, s32 offset:264 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v67, off, s32 offset:264 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v39, off, s32 offset:260 ; GFX11-TRUE16-NEXT: s_clause 0x1f -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v67, off, s32 offset:256 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v68, off, s32 offset:256 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v48, off, s32 offset:252 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v68, off, s32 offset:248 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v71, off, s32 offset:248 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v48, off, s32 offset:244 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v70, off, s32 offset:240 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v71, off, s32 offset:240 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v49, off, s32 offset:236 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v70, off, s32 offset:232 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v49, off, s32 offset:228 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v71, off, s32 offset:224 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v80, off, s32 offset:224 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v50, off, s32 offset:220 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v71, off, s32 offset:216 -; GFX11-TRUE16-NEXT: scratch_load_b32 v114, off, s32 offset:388 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v81, off, s32 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v82, off, s32 offset:8 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v83, off, s32 offset:16 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v83, off, s32 offset:24 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v85, off, s32 offset:32 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v85, off, s32 offset:40 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v87, off, s32 offset:48 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v87, off, s32 offset:56 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v97, off, s32 offset:64 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v97, off, s32 offset:72 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v98, off, s32 offset:80 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v99, off, s32 offset:88 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v100, off, s32 offset:96 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v101, off, s32 offset:104 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v80, off, s32 offset:216 +; GFX11-TRUE16-NEXT: scratch_load_b32 v113, off, s32 offset:388 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v83, off, s32 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v84, off, s32 offset:8 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v84, off, s32 offset:16 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v85, off, s32 offset:24 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v85, off, s32 offset:32 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v96, off, s32 offset:40 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v96, off, s32 offset:48 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v97, off, s32 offset:56 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v97, off, s32 offset:64 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v98, off, s32 offset:72 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v100, off, s32 offset:80 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v101, off, s32 offset:88 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v101, off, s32 offset:96 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v102, off, s32 offset:104 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v102, off, s32 offset:112 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v160, off, s32 offset:120 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v160, off, s32 offset:128 @@ -53072,121 +52802,123 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v164, off, s32 offset:192 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v165, off, s32 offset:200 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v165, off, s32 offset:208 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v55, off, s32 offset:212 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v65, off, s32 offset:204 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v65, off, s32 offset:196 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v67, off, s32 offset:188 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v68, off, s32 offset:180 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v69, off, s32 offset:172 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v69, off, s32 offset:164 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v80, off, s32 offset:156 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v80, off, s32 offset:148 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v53, off, s32 offset:212 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v64, off, s32 offset:204 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v64, off, s32 offset:196 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v65, off, s32 offset:188 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v65, off, s32 offset:180 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v68, off, s32 offset:172 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v69, off, s32 offset:164 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v69, off, s32 offset:156 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v70, off, s32 offset:148 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v81, off, s32 offset:140 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v82, off, s32 offset:132 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v84, off, s32 offset:124 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v84, off, s32 offset:116 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v86, off, s32 offset:108 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v86, off, s32 offset:100 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v96, off, s32 offset:92 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v96, off, s32 offset:84 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v98, off, s32 offset:76 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v99, off, s32 offset:68 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v100, off, s32 offset:60 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v101, off, s32 offset:52 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v103, off, s32 offset:44 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v103, off, s32 offset:36 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v112, off, s32 offset:28 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v113, off, s32 offset:20 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v81, off, s32 offset:132 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v82, off, s32 offset:124 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v82, off, s32 offset:116 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v83, off, s32 offset:108 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v86, off, s32 offset:100 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v86, off, s32 offset:92 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v87, off, s32 offset:84 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v87, off, s32 offset:76 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v98, off, s32 offset:68 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v99, off, s32 offset:60 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v99, off, s32 offset:52 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v100, off, s32 offset:44 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v103, off, s32 offset:36 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v103, off, s32 offset:28 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v112, off, s32 offset:20 ; GFX11-TRUE16-NEXT: s_clause 0x1 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v115, off, s32 offset:12 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v112, off, s32 offset:12 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v115, off, s32 offset:4 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v117.l, v30.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v118.h, v28.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v119.l, v26.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v119.h, v24.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.l, v22.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.h, v20.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.l, v18.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v116.l, v30.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v116.h, v28.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v119.h, v26.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.l, v24.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.h, v22.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.l, v20.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v18.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v132.h, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v146.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.l, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.l, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v6.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v146.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.l, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.l, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v144.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v145.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.h, 8, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v146.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.l, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.l, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.h, 8, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v134.h, 8, v19.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v135.l, 8, v21.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v135.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v133.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v133.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v131.h, 8, v29.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(54) -; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v114 -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(17) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v132.l, 8, v81.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(16) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v129.l, 8, v82.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v129.h, 8, v83.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v128.l, 8, v83.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v128.h, 8, v85.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v117.h, 8, v85.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v118.l, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v116.l, 8, v87.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v116.h, 8, v97.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.l, 8, v97.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(9) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.h, 8, v98.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v112.h, 8, v99.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v113.l, 8, v100.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.l, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.h, 8, v102.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v100.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v101.l, 8, v160.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v98.h, 8, v161.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v99.l, 8, v161.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.l, 8, v162.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.h, 8, v162.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v87.l, 8, v163.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v87.h, 8, v163.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.l, 8, v164.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.h, 8, v164.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v83.l, 8, v165.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v83.h, 8, v165.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v81.h, 8, v71.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v82.l, 8, v71.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.l, 8, v70.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v70.l, 8, v68.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v70.h, 8, v67.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v67.h, 8, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v68.l, 8, v66.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.l, 8, v64.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.h, 8, v64.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v64.l, 8, v55.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v64.h, 8, v54.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v54.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.l, 8, v53.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v52.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.h, 8, v52.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.l, 8, v51.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.l, 8, v50.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.h, 8, v31.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v144.l, 8, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v144.h, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v130.l, 8, v29.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(62) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v51.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v51.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(56) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.h, 8, v50.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v54.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v54.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(26) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.h, 8, v53.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v67.l, 8, v67.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.h, 8, v66.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.l, 8, v66.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.h, 8, v71.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.l, 8, v71.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(18) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v70.h, 8, v70.h +; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v113 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v130.h, 8, v83.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v131.l, 8, v84.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v131.h, 8, v84.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v132.l, 8, v85.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v117.l, 8, v85.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v117.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v118.l, 8, v96.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v118.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v119.l, 8, v97.h +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(8) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v113.l, 8, v98.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v113.h, 8, v100.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.l, 8, v101.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.h, 8, v101.h +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v115.l, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v100.h, 8, v102.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v101.l, 8, v160.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v101.h, 8, v160.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.l, 8, v161.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.h, 8, v161.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v96.l, 8, v162.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v96.h, 8, v162.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.l, 8, v163.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.h, 8, v163.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v98.l, 8, v164.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v83.h, 8, v164.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v84.l, 8, v165.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v84.h, 8, v165.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.l, 8, v80.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v80.l, 8, v68.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v80.h, 8, v67.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v67.h, 8, v55.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v68.l, 8, v55.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.l, 8, v52.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.h, 8, v52.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.l, 8, v31.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.h, 8, v31.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) @@ -53197,215 +52929,179 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB38_4 ; GFX11-TRUE16-NEXT: .LBB38_2: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; GFX11-TRUE16-NEXT: .LBB38_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v149.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v146.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v146.l -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v0.l, v151.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v151.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v0.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v150.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v3.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v145.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v144.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v149, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v1.h, v150.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v146.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v145.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v145.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v134.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v133.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v133.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v132.h -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v131.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v148.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v130.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v149, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v2.l, v148.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v2.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v134.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v134.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v145.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v130.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v149, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v3.l, v147.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v147.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v3.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v135.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v119.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v119.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v118.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v149, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v4.l, v144.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v4.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v133.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v117.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v129.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v129.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v128.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v119.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v116.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v116.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v115.h -; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v115.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v149, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v5.l, v135.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v5.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v132.l -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v129.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v113.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v112.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v149, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v6.l, v133.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v6.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v103.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v128.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v103.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v101.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v149, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v7.l, v131.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v7.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v118.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v100.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v99.h -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v98.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v149, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v8.l, v129.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v8.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v116.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v114.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v96.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v96.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v149, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v9.l, v128.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v9.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v86.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v113.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v86.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v84.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v149, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v10.l, v117.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v10.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v102.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v84.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v82.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v112.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v112.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v103.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v103.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v100.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v99.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v99.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v98.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v87.h +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v87.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v86.h +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v86.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v83.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v82.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v82.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v81.h ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v81.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v149, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v11.l, v116.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v11.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v101.l -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v99.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v80.h -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v80.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v149, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v12.l, v114.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v12.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v69.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v97.h -; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v69.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v68.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v149, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v13.l, v112.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v13.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v87.h -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v67.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v65.h -; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v65.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v149, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v14.l, v102.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v14.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v85.h -; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v83.h -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v55.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v70.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v69.h +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v69.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v68.h +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v65.h +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v65.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v64.h +; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v64.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v50.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v149, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v15.l, v100.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v15.l, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v49.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v82.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v49.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v48.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v149, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v16.l, v98.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v16.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v71.h ; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v48.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v39.h ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v39.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v149, v16 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v17.l, v97.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v17.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v70.h -; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v68.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v38.h ; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v38.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v149, v17 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v18.l, v87.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v18.l, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v37.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v66.h ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v37.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v36.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v149, v18 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v19.l, v85.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v19.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v64.h ; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v35.h ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v35.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v149, v19 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v20.l, v83.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v20.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v55.l -; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v34.h ; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v34.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v149, v20 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v21.l, v81.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v21.l, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v33.l ; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v149, v21 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v22.l, v71.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v22.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v52.l ; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v32.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v150.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v150.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v151.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v151.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v146.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v147.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v147.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v148.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v148.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v134.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v135.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v135.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v144.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v144.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v130.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v130.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v131.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v131.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v132.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v117.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v117.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v118.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v118.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v119.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v113.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v113.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v114.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v114.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v115.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v100.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v101.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v101.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v16.l, v102.l +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v102.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v17.l, v96.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v96.h +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v18.l, v97.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v97.h +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v19.l, v98.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v83.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v20.l, v84.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v84.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v21.l, v85.l +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v85.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v22.l, v70.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v71.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v23.l, v71.h +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v80.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v24.l, v80.h +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v66.l +; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v25.l, v66.h +; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v67.l +; GFX11-TRUE16-NEXT: v_or_b16 v26.l, v26.l, v67.h +; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v68.l +; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v27.l, v53.h +; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v28.l, v28.l, v54.h +; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v55.l +; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v29.l, v55.h +; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v50.h +; GFX11-TRUE16-NEXT: v_or_b16 v30.l, v30.l, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v51.h +; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v31.l, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v52.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_lo16 @@ -53427,433 +53123,329 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v149, v22 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v23.l, v70.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v23.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v51.l -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v149, v23 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v24.l, v67.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v24.l, v149.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v149, v24 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v25.l, v66.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v25.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v149, v25 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v26.l, v64.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v26.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v149, v26 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v27.l, v54.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v27.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v149, v27 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v28.l, v53.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v28.l, v149.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v149, v28 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v29.l, v52.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v29.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v149, v29 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v30.l, v51.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v149, v30 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v31.l, v50.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v149.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v149, v31 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB38_2 ; GFX11-TRUE16-NEXT: .LBB38_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v149.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v149.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v146.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v146.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v146.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v145.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v145.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v134.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v133.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v133.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v132.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v129.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v129.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v128.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v128.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v119.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v116.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v116.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v115.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v112.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, v112.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, v103.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v103.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v100.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, v99.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, v99.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v98.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v87.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, v87.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, v86.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v86.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v83.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, v82.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, v82.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, v81.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, v81.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.l, v70.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, v69.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, v69.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, v68.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.l, v65.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.h, v65.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, v64.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, v64.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, v53.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.h, v50.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, v49.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, v49.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.l, v48.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.h, v48.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, v39.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, v39.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, v38.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.h, v38.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, v37.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, v37.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.l, v36.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.h, v36.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, v35.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, v35.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.l, v34.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.h, v34.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, v33.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, v33.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, v32.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.h, v32.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v151.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v151.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v150.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v150.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v145.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v144.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v134.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v31, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v134.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v148.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v148.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v31, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v31.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v147.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v147.h, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v132.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v131.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v31, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v130.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v130.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v144.h, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v145.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v31, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v135.l, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v135.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v119.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v119.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v31, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v118.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v117.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v133.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v133.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v31, v8 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v131.h, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v132.l, v6.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v115.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v115.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v31, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v113.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v112.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v129.l, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v129.h, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v31, v10 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v128.l, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v128.h, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v103.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v103.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v31, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v101.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v100.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v117.h, v9.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v118.l, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v31, v12 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v116.l, v10.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v116.h, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v99.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v98.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v31, v13 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v96.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v96.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v114.l, v11.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v114.h, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v31, v14 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v112.h, v12.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v113.l, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v86.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v86.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v31, v15 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v84.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v84.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v102.l, v13.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v102.h, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v31, v16 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v100.h, v14.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v101.l, v14.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, v82.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, v81.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v31, v17 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v16.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, v80.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, v80.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v98.h, v15.l -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v99.l, v15.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v31, v18 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v17.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.h, 0x300, v17.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v97.l, v16.l -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v97.h, v16.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, v69.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, v69.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v31, v19 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v17.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, 0x300, v17.h -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, v68.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, v67.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v87.l, v17.l -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v87.h, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v18.l ; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v31, v20 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v19.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.h, 0x300, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v85.l, v18.l -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v85.h, v18.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, v65.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, v65.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v31, v21 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v19.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, 0x300, v19.h -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, v55.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, v50.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v83.l, v19.l -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v83.h, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v31, v22 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v21.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.h, 0x300, v21.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v81.h, v20.l -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v82.l, v20.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, v49.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, v49.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v31, v23 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v21.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, 0x300, v21.h -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, v48.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, v48.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v71.l, v21.l -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v71.h, v21.h +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v31, v24 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v23.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.h, 0x300, v23.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v70.l, v22.l -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v70.h, v22.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, v39.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, v39.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v31, v25 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v23.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, 0x300, v23.h -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v24.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, v38.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, v38.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v67.h, v23.l -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v68.l, v23.h +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v31, v26 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v25.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.h, 0x300, v25.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v66.l, v24.l -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v66.h, v24.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, v37.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, v37.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v31, v27 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v25.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, 0x300, v25.h -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, v36.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, v36.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v64.l, v25.l -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v64.h, v25.h +; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v31, v28 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v27.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.h, 0x300, v27.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v54.h, v26.l -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v55.l, v26.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, v35.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, v35.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v31, v29 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v27.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, 0x300, v27.h -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, v34.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, v34.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v53.h, v27.l -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v54.l, v27.h +; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l +; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v31, v30 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v29.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v34.h, 0x300, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v52.h, v28.l -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v53.l, v28.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, v33.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, v33.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v31, v34 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v29.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v33.h, 0x300, v29.h -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v30.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, v32.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, v32.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v32.l, v51.h, v29.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v52.l, v29.h +; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v31, v33 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v32.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v33.h, 0x300, v32.h -; GFX11-TRUE16-NEXT: v_or_b16 v32.l, v50.h, v30.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v51.l, v30.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v31, v33 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v32.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v32.h, 0x300, v32.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v31, v32 +; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v31.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v150.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v150.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v151.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v151.h, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v146.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v147.l, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v147.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v148.l, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v148.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v134.h, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v135.l, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v135.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v144.l, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v144.h, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v130.l, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v130.h, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v131.l, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v131.h, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v132.l, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v117.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v117.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v118.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v118.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v119.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v113.l, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v113.h, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v114.l, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v114.h, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v115.l, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v100.h, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v101.l, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v101.h, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v102.l, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v102.h, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v96.l, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v96.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v97.l, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v97.h, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v98.l, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v83.h, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v84.l, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v84.h, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v85.l, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v85.h, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v70.h, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v71.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v71.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v80.l, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v80.h, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v66.l, v24.h +; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v66.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v67.l, v25.h +; GFX11-TRUE16-NEXT: v_or_b16 v26.l, v67.h, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v68.l, v26.h +; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v53.h, v27.l +; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v54.l, v27.h +; GFX11-TRUE16-NEXT: v_or_b16 v28.l, v54.h, v28.l +; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v55.l, v28.h +; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v55.h, v29.l +; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v50.h, v29.h +; GFX11-TRUE16-NEXT: v_or_b16 v30.l, v51.l, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v51.h, v30.h +; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v52.l, v31.l +; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v52.h, v31.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, 0x300, v8.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v8.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v9.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v9.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v10.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v10.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, 0x300, v11.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v11.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, 0x300, v12.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v12.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, 0x300, v13.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v13.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, 0x300, v14.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v14.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, 0x300, v15.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v15.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, 0x300, v16.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, 0x300, v16.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.l, 0x300, v17.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, 0x300, v17.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, 0x300, v18.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, 0x300, v18.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.l, 0x300, v19.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.h, 0x300, v19.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, 0x300, v20.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, 0x300, v20.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v21.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.h, 0x300, v21.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, 0x300, v22.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, 0x300, v22.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.l, 0x300, v23.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.h, 0x300, v23.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, 0x300, v24.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, 0x300, v24.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v25.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.h, 0x300, v25.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, 0x300, v26.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, 0x300, v26.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.l, 0x300, v27.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.h, 0x300, v27.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, 0x300, v28.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, 0x300, v28.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.l, 0x300, v29.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.h, 0x300, v29.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, 0x300, v30.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, 0x300, v30.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v31.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.h, 0x300, v31.h ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -78968,271 +78560,205 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v39.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v66.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v1.l, v33.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v33.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v162.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v39.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v161.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v160.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v39, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v2.l, v33.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v65.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v65.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v35.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v39, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v3.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v39.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v36.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v149.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v39, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v37.h, 8, v148.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v36.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v145.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v39, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v37.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v36.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v39, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v52.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v39, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v39, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v39, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v16.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v49.l -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v39, v16 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v39, v18 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v39, v20 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l -; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v39, v22 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v39, v24 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v84.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v39, v26 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v83.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v82.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v81.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l -; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l -; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v39, v28 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v29.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v34.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v80.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v71.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v31.l -; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v39, v29 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v30.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v34.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v33.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v32.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.h, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v39, v30 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v31.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v33.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v69.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v68.l ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v39, v31 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v32.l, v33.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v32.h, v33.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v39.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, v39, v32 +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v133.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v132.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v52.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v131.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v130.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v9.h, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v5.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.h, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v129.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v13.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v51.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v14.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v118.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v10.l, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v6.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v7.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v8.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.l, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v50.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v117.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v16.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v116.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v115.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v49.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v10.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v11.h, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v18.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v114.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v112.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v12.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v13.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v14.l, v14.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v20.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v102.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v21.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v101.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v21.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v38.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v22.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v100.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v22.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v99.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v15.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v16.h, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v98.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v23.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v37.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v24.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v24.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v87.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v17.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v18.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v19.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v25.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v36.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v26.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v86.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v26.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v85.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v84.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v35.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v18.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.h, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v20.h, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v21.h, v22.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v28.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v83.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v28.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v82.l +; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v29.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v81.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v29.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v34.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v30.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v20.l, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v21.l, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v22.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v23.l, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v24.l, v24.h +; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v30.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v71.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.l, 8, v70.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v31.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v33.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v32.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.l, 8, v69.l +; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v32.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v68.l +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v23.h, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v25.h, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v26.h, v27.l ; GFX11-TRUE16-NEXT: s_clause 0x5 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:32 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:48 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:64 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:80 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[25:28], off offset:96 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[29:32], off offset:112 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:48 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:64 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: bitcast_v16i64_to_v128i8: @@ -88136,61 +87662,61 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v32, off, s32 offset:380 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v31, off, s32 offset:376 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v32, off, s32 offset:372 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v50, off, s32 offset:368 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v51, off, s32 offset:368 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v33, off, s32 offset:364 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v51, off, s32 offset:360 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v33, off, s32 offset:356 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v51, off, s32 offset:352 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v50, off, s32 offset:352 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v34, off, s32 offset:348 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v52, off, s32 offset:344 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v34, off, s32 offset:340 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v52, off, s32 offset:336 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v35, off, s32 offset:332 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v53, off, s32 offset:328 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v54, off, s32 offset:328 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v35, off, s32 offset:324 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v53, off, s32 offset:320 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v54, off, s32 offset:320 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v36, off, s32 offset:316 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v54, off, s32 offset:312 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v53, off, s32 offset:312 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v36, off, s32 offset:308 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v54, off, s32 offset:304 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v55, off, s32 offset:304 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v37, off, s32 offset:300 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v55, off, s32 offset:296 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v55, off, s32 offset:296 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v37, off, s32 offset:292 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v64, off, s32 offset:288 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v67, off, s32 offset:288 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v38, off, s32 offset:284 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v64, off, s32 offset:280 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v66, off, s32 offset:280 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v38, off, s32 offset:276 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v66, off, s32 offset:272 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v39, off, s32 offset:268 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v66, off, s32 offset:264 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v67, off, s32 offset:264 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v39, off, s32 offset:260 ; GFX11-TRUE16-NEXT: s_clause 0x1f -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v67, off, s32 offset:256 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v68, off, s32 offset:256 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v48, off, s32 offset:252 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v68, off, s32 offset:248 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v71, off, s32 offset:248 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v48, off, s32 offset:244 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v70, off, s32 offset:240 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v71, off, s32 offset:240 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v49, off, s32 offset:236 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v70, off, s32 offset:232 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v49, off, s32 offset:228 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v71, off, s32 offset:224 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v80, off, s32 offset:224 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v50, off, s32 offset:220 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v71, off, s32 offset:216 -; GFX11-TRUE16-NEXT: scratch_load_b32 v114, off, s32 offset:388 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v81, off, s32 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v82, off, s32 offset:8 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v83, off, s32 offset:16 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v83, off, s32 offset:24 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v85, off, s32 offset:32 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v85, off, s32 offset:40 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v87, off, s32 offset:48 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v87, off, s32 offset:56 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v97, off, s32 offset:64 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v97, off, s32 offset:72 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v98, off, s32 offset:80 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v99, off, s32 offset:88 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v100, off, s32 offset:96 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v101, off, s32 offset:104 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v80, off, s32 offset:216 +; GFX11-TRUE16-NEXT: scratch_load_b32 v113, off, s32 offset:388 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v83, off, s32 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v84, off, s32 offset:8 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v84, off, s32 offset:16 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v85, off, s32 offset:24 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v85, off, s32 offset:32 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v96, off, s32 offset:40 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v96, off, s32 offset:48 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v97, off, s32 offset:56 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v97, off, s32 offset:64 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v98, off, s32 offset:72 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v100, off, s32 offset:80 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v101, off, s32 offset:88 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v101, off, s32 offset:96 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v102, off, s32 offset:104 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v102, off, s32 offset:112 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v160, off, s32 offset:120 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v160, off, s32 offset:128 @@ -88205,121 +87731,123 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v164, off, s32 offset:192 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v165, off, s32 offset:200 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v165, off, s32 offset:208 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v55, off, s32 offset:212 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v65, off, s32 offset:204 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v65, off, s32 offset:196 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v67, off, s32 offset:188 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v68, off, s32 offset:180 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v69, off, s32 offset:172 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v69, off, s32 offset:164 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v80, off, s32 offset:156 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v80, off, s32 offset:148 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v53, off, s32 offset:212 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v64, off, s32 offset:204 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v64, off, s32 offset:196 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v65, off, s32 offset:188 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v65, off, s32 offset:180 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v68, off, s32 offset:172 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v69, off, s32 offset:164 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v69, off, s32 offset:156 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v70, off, s32 offset:148 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v81, off, s32 offset:140 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v82, off, s32 offset:132 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v84, off, s32 offset:124 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v84, off, s32 offset:116 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v86, off, s32 offset:108 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v86, off, s32 offset:100 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v96, off, s32 offset:92 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v96, off, s32 offset:84 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v98, off, s32 offset:76 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v99, off, s32 offset:68 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v100, off, s32 offset:60 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v101, off, s32 offset:52 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v103, off, s32 offset:44 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v103, off, s32 offset:36 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v112, off, s32 offset:28 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v113, off, s32 offset:20 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v81, off, s32 offset:132 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v82, off, s32 offset:124 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v82, off, s32 offset:116 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v83, off, s32 offset:108 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v86, off, s32 offset:100 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v86, off, s32 offset:92 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v87, off, s32 offset:84 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v87, off, s32 offset:76 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v98, off, s32 offset:68 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v99, off, s32 offset:60 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v99, off, s32 offset:52 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v100, off, s32 offset:44 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v103, off, s32 offset:36 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v103, off, s32 offset:28 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v112, off, s32 offset:20 ; GFX11-TRUE16-NEXT: s_clause 0x1 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v115, off, s32 offset:12 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v112, off, s32 offset:12 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v115, off, s32 offset:4 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v117.l, v30.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v118.h, v28.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v119.l, v26.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v119.h, v24.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.l, v22.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.h, v20.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.l, v18.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v116.l, v30.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v116.h, v28.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v119.h, v26.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.l, v24.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.h, v22.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.l, v20.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v18.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v132.h, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v146.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.l, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.l, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v6.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v146.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.l, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.l, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v144.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v145.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.h, 8, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v146.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.l, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.l, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.h, 8, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v134.h, 8, v19.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v135.l, 8, v21.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v135.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v133.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v133.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v131.h, 8, v29.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(54) -; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v114 -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(17) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v132.l, 8, v81.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(16) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v129.l, 8, v82.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v129.h, 8, v83.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v128.l, 8, v83.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v128.h, 8, v85.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v117.h, 8, v85.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v118.l, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v116.l, 8, v87.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v116.h, 8, v97.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.l, 8, v97.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(9) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.h, 8, v98.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v112.h, 8, v99.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v113.l, 8, v100.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.l, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.h, 8, v102.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v100.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v101.l, 8, v160.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v98.h, 8, v161.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v99.l, 8, v161.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.l, 8, v162.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.h, 8, v162.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v87.l, 8, v163.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v87.h, 8, v163.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.l, 8, v164.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.h, 8, v164.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v83.l, 8, v165.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v83.h, 8, v165.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v81.h, 8, v71.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v82.l, 8, v71.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.l, 8, v70.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v70.l, 8, v68.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v70.h, 8, v67.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v67.h, 8, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v68.l, 8, v66.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.l, 8, v64.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.h, 8, v64.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v64.l, 8, v55.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v64.h, 8, v54.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v54.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.l, 8, v53.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v52.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.h, 8, v52.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.l, 8, v51.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.l, 8, v50.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.h, 8, v31.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v144.l, 8, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v144.h, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v130.l, 8, v29.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(62) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v51.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v51.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(56) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.h, 8, v50.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v54.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v54.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(26) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.h, 8, v53.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v67.l, 8, v67.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.h, 8, v66.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.l, 8, v66.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.h, 8, v71.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.l, 8, v71.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(18) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v70.h, 8, v70.h +; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v113 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v130.h, 8, v83.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v131.l, 8, v84.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v131.h, 8, v84.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v132.l, 8, v85.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v117.l, 8, v85.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v117.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v118.l, 8, v96.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v118.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v119.l, 8, v97.h +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(8) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v113.l, 8, v98.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v113.h, 8, v100.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.l, 8, v101.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.h, 8, v101.h +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v115.l, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v100.h, 8, v102.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v101.l, 8, v160.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v101.h, 8, v160.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.l, 8, v161.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.h, 8, v161.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v96.l, 8, v162.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v96.h, 8, v162.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.l, 8, v163.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.h, 8, v163.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v98.l, 8, v164.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v83.h, 8, v164.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v84.l, 8, v165.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v84.h, 8, v165.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.l, 8, v80.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v80.l, 8, v68.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v80.h, 8, v67.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v67.h, 8, v55.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v68.l, 8, v55.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.l, 8, v52.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.h, 8, v52.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.l, 8, v31.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.h, 8, v31.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) @@ -88330,215 +87858,179 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB58_4 ; GFX11-TRUE16-NEXT: .LBB58_2: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; GFX11-TRUE16-NEXT: .LBB58_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v149.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v146.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v146.l -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v0.l, v151.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v151.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v0.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v150.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v3.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v145.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v144.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v149, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v1.h, v150.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v146.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v145.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v145.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v134.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v133.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v133.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v132.h -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v131.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v148.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v130.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v149, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v2.l, v148.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v2.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v134.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v134.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v145.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v130.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v149, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v3.l, v147.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v147.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v3.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v135.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v119.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v119.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v118.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v149, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v4.l, v144.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v4.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v133.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v117.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v129.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v129.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v128.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v119.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v116.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v116.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v115.h -; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v115.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v149, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v5.l, v135.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v5.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v132.l -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v129.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v113.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v112.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v149, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v6.l, v133.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v6.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v103.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v128.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v103.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v101.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v149, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v7.l, v131.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v7.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v118.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v100.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v99.h -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v98.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v149, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v8.l, v129.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v8.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v116.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v114.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v96.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v96.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v149, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v9.l, v128.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v9.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v86.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v113.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v86.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v84.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v149, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v10.l, v117.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v10.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v102.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v84.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v82.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v112.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v112.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v103.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v103.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v100.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v99.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v99.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v98.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v87.h +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v87.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v86.h +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v86.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v83.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v82.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v82.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v81.h ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v81.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v149, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v11.l, v116.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v11.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v101.l -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v99.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v80.h -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v80.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v149, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v12.l, v114.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v12.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v69.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v97.h -; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v69.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v68.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v149, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v13.l, v112.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v13.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v87.h -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v67.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v65.h -; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v65.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v149, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v14.l, v102.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v14.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v85.h -; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v83.h -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v55.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v70.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v69.h +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v69.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v68.h +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v65.h +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v65.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v64.h +; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v64.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v50.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v149, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v15.l, v100.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v15.l, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v49.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v82.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v49.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v48.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v149, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v16.l, v98.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v16.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v71.h ; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v48.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v39.h ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v39.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v149, v16 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v17.l, v97.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v17.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v70.h -; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v68.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v38.h ; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v38.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v149, v17 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v18.l, v87.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v18.l, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v37.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v66.h ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v37.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v36.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v149, v18 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v19.l, v85.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v19.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v64.h ; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v35.h ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v35.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v149, v19 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v20.l, v83.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v20.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v55.l -; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v34.h ; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v34.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v149, v20 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v21.l, v81.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v21.l, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v33.l ; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v149, v21 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v22.l, v71.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v22.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v52.l ; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v32.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v150.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v150.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v151.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v151.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v146.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v147.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v147.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v148.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v148.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v134.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v135.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v135.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v144.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v144.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v130.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v130.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v131.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v131.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v132.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v117.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v117.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v118.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v118.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v119.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v113.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v113.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v114.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v114.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v115.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v100.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v101.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v101.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v16.l, v102.l +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v102.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v17.l, v96.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v96.h +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v18.l, v97.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v97.h +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v19.l, v98.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v83.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v20.l, v84.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v84.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v21.l, v85.l +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v85.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v22.l, v70.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v71.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v23.l, v71.h +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v80.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v24.l, v80.h +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v66.l +; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v25.l, v66.h +; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v67.l +; GFX11-TRUE16-NEXT: v_or_b16 v26.l, v26.l, v67.h +; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v68.l +; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v27.l, v53.h +; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v28.l, v28.l, v54.h +; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v55.l +; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v29.l, v55.h +; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v50.h +; GFX11-TRUE16-NEXT: v_or_b16 v30.l, v30.l, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v51.h +; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v31.l, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v52.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_lo16 @@ -88560,433 +88052,329 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v149, v22 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v23.l, v70.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v23.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v51.l -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v149, v23 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v24.l, v67.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v24.l, v149.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v149, v24 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v25.l, v66.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v25.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v149, v25 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v26.l, v64.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v26.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v149, v26 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v27.l, v54.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v27.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v149, v27 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v28.l, v53.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v28.l, v149.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v149, v28 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v29.l, v52.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v29.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v149, v29 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v30.l, v51.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v149, v30 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v31.l, v50.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v149.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v149, v31 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB58_2 ; GFX11-TRUE16-NEXT: .LBB58_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v149.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v149.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v146.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v146.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v146.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v145.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v145.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v134.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v133.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v133.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v132.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v129.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v129.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v128.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v128.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v119.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v116.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v116.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v115.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v112.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, v112.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, v103.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v103.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v100.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, v99.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, v99.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v98.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v87.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, v87.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, v86.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v86.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v83.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, v82.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, v82.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, v81.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, v81.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.l, v70.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, v69.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, v69.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, v68.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.l, v65.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.h, v65.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, v64.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, v64.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, v53.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.h, v50.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, v49.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, v49.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.l, v48.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.h, v48.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, v39.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, v39.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, v38.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.h, v38.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, v37.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, v37.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.l, v36.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.h, v36.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, v35.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, v35.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.l, v34.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.h, v34.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, v33.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, v33.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, v32.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.h, v32.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v151.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v151.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v150.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v150.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v145.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v144.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v134.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v31, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v134.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v148.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v148.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v31, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v31.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v147.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v147.h, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v132.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v131.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v31, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v130.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v130.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v144.h, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v145.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v31, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v135.l, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v135.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v119.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v119.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v31, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v118.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v117.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v133.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v133.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v31, v8 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v131.h, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v132.l, v6.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v115.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v115.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v31, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v113.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v112.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v129.l, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v129.h, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v31, v10 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v128.l, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v128.h, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v103.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v103.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v31, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v101.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v100.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v117.h, v9.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v118.l, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v31, v12 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v116.l, v10.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v116.h, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v99.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v98.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v31, v13 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v96.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v96.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v114.l, v11.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v114.h, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v31, v14 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v112.h, v12.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v113.l, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v86.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v86.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v31, v15 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v84.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v84.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v102.l, v13.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v102.h, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v31, v16 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v100.h, v14.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v101.l, v14.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, v82.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, v81.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v31, v17 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v16.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, v80.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, v80.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v98.h, v15.l -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v99.l, v15.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v31, v18 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v17.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.h, 0x300, v17.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v97.l, v16.l -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v97.h, v16.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, v69.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, v69.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v31, v19 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v17.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, 0x300, v17.h -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, v68.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, v67.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v87.l, v17.l -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v87.h, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v18.l ; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v31, v20 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v19.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.h, 0x300, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v85.l, v18.l -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v85.h, v18.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, v65.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, v65.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v31, v21 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v19.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, 0x300, v19.h -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, v55.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, v50.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v83.l, v19.l -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v83.h, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v31, v22 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v21.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.h, 0x300, v21.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v81.h, v20.l -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v82.l, v20.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, v49.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, v49.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v31, v23 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v21.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, 0x300, v21.h -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, v48.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, v48.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v71.l, v21.l -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v71.h, v21.h +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v31, v24 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v23.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.h, 0x300, v23.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v70.l, v22.l -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v70.h, v22.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, v39.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, v39.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v31, v25 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v23.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, 0x300, v23.h -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v24.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, v38.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, v38.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v67.h, v23.l -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v68.l, v23.h +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v31, v26 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v25.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.h, 0x300, v25.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v66.l, v24.l -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v66.h, v24.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, v37.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, v37.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v31, v27 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v25.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, 0x300, v25.h -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, v36.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, v36.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v64.l, v25.l -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v64.h, v25.h +; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v31, v28 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v27.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.h, 0x300, v27.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v54.h, v26.l -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v55.l, v26.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, v35.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, v35.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v31, v29 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v27.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, 0x300, v27.h -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, v34.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, v34.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v53.h, v27.l -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v54.l, v27.h +; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l +; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v31, v30 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v29.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v34.h, 0x300, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v52.h, v28.l -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v53.l, v28.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, v33.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, v33.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v31, v34 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v29.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v33.h, 0x300, v29.h -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v30.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, v32.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, v32.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v32.l, v51.h, v29.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v52.l, v29.h +; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v31, v33 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v32.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v33.h, 0x300, v32.h -; GFX11-TRUE16-NEXT: v_or_b16 v32.l, v50.h, v30.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v51.l, v30.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v31, v33 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v32.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v32.h, 0x300, v32.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v31, v32 +; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v31.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v150.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v150.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v151.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v151.h, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v146.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v147.l, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v147.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v148.l, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v148.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v134.h, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v135.l, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v135.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v144.l, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v144.h, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v130.l, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v130.h, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v131.l, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v131.h, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v132.l, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v117.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v117.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v118.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v118.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v119.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v113.l, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v113.h, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v114.l, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v114.h, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v115.l, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v100.h, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v101.l, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v101.h, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v102.l, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v102.h, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v96.l, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v96.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v97.l, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v97.h, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v98.l, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v83.h, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v84.l, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v84.h, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v85.l, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v85.h, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v70.h, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v71.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v71.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v80.l, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v80.h, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v66.l, v24.h +; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v66.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v67.l, v25.h +; GFX11-TRUE16-NEXT: v_or_b16 v26.l, v67.h, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v68.l, v26.h +; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v53.h, v27.l +; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v54.l, v27.h +; GFX11-TRUE16-NEXT: v_or_b16 v28.l, v54.h, v28.l +; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v55.l, v28.h +; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v55.h, v29.l +; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v50.h, v29.h +; GFX11-TRUE16-NEXT: v_or_b16 v30.l, v51.l, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v51.h, v30.h +; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v52.l, v31.l +; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v52.h, v31.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, 0x300, v8.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v8.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v9.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v9.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v10.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v10.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, 0x300, v11.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v11.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, 0x300, v12.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v12.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, 0x300, v13.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v13.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, 0x300, v14.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v14.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, 0x300, v15.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v15.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, 0x300, v16.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, 0x300, v16.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.l, 0x300, v17.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, 0x300, v17.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, 0x300, v18.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, 0x300, v18.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.l, 0x300, v19.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.h, 0x300, v19.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, 0x300, v20.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, 0x300, v20.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v21.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.h, 0x300, v21.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, 0x300, v22.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, 0x300, v22.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.l, 0x300, v23.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.h, 0x300, v23.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, 0x300, v24.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, 0x300, v24.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v25.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.h, 0x300, v25.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, 0x300, v26.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, 0x300, v26.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.l, 0x300, v27.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.h, 0x300, v27.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, 0x300, v28.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, 0x300, v28.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.l, 0x300, v29.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.h, 0x300, v29.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, 0x300, v30.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, 0x300, v30.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v31.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.h, 0x300, v31.h ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -113114,271 +112502,205 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v39.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v66.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v1.l, v33.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v33.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v162.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v39.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v161.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v160.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v39, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v2.l, v33.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v65.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v65.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v35.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v39, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v3.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v39.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v36.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v149.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v39, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v37.h, 8, v148.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v36.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v145.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v39, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v37.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v36.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v39, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v52.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v39, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v39, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v39, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v16.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v49.l -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v39, v16 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v39, v18 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v39, v20 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l -; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v39, v22 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v39, v24 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v84.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v39, v26 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v83.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v82.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v81.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l -; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l -; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v39, v28 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v29.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v34.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v80.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v71.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v31.l -; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v39, v29 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v30.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v34.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v33.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v32.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.h, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v39, v30 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v31.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v33.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v69.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v68.l ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v39, v31 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v32.l, v33.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v32.h, v33.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v39.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, v39, v32 +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v133.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v132.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v52.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v131.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v130.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v9.h, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v5.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.h, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v129.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v13.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v51.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v14.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v118.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v10.l, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v6.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v7.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v8.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.l, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v50.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v117.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v16.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v116.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v115.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v49.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v10.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v11.h, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v18.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v114.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v112.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v12.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v13.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v14.l, v14.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v20.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v102.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v21.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v101.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v21.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v38.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v22.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v100.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v22.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v99.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v15.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v16.h, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v98.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v23.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v37.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v24.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v24.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v87.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v17.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v18.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v19.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v25.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v36.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v26.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v86.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v26.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v85.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v84.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v35.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v18.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.h, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v20.h, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v21.h, v22.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v28.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v83.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v28.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v82.l +; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v29.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v81.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v29.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v34.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v30.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v20.l, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v21.l, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v22.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v23.l, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v24.l, v24.h +; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v30.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v71.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.l, 8, v70.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v31.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v33.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v32.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.l, 8, v69.l +; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v32.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v68.l +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v23.h, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v25.h, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v26.h, v27.l ; GFX11-TRUE16-NEXT: s_clause 0x5 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:32 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:48 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:64 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:80 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[25:28], off offset:96 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[29:32], off offset:112 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:48 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:64 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: bitcast_v16f64_to_v128i8: @@ -123405,61 +122727,61 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v32, off, s32 offset:380 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v31, off, s32 offset:376 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v32, off, s32 offset:372 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v50, off, s32 offset:368 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v51, off, s32 offset:368 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v33, off, s32 offset:364 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v51, off, s32 offset:360 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v33, off, s32 offset:356 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v51, off, s32 offset:352 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v50, off, s32 offset:352 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v34, off, s32 offset:348 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v52, off, s32 offset:344 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v34, off, s32 offset:340 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v52, off, s32 offset:336 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v35, off, s32 offset:332 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v53, off, s32 offset:328 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v54, off, s32 offset:328 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v35, off, s32 offset:324 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v53, off, s32 offset:320 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v54, off, s32 offset:320 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v36, off, s32 offset:316 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v54, off, s32 offset:312 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v53, off, s32 offset:312 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v36, off, s32 offset:308 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v54, off, s32 offset:304 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v55, off, s32 offset:304 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v37, off, s32 offset:300 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v55, off, s32 offset:296 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v55, off, s32 offset:296 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v37, off, s32 offset:292 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v64, off, s32 offset:288 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v67, off, s32 offset:288 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v38, off, s32 offset:284 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v64, off, s32 offset:280 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v66, off, s32 offset:280 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v38, off, s32 offset:276 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v66, off, s32 offset:272 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v39, off, s32 offset:268 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v66, off, s32 offset:264 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v67, off, s32 offset:264 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v39, off, s32 offset:260 ; GFX11-TRUE16-NEXT: s_clause 0x1f -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v67, off, s32 offset:256 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v68, off, s32 offset:256 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v48, off, s32 offset:252 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v68, off, s32 offset:248 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v71, off, s32 offset:248 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v48, off, s32 offset:244 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v70, off, s32 offset:240 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v71, off, s32 offset:240 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v49, off, s32 offset:236 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v70, off, s32 offset:232 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v49, off, s32 offset:228 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v71, off, s32 offset:224 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v80, off, s32 offset:224 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v50, off, s32 offset:220 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v71, off, s32 offset:216 -; GFX11-TRUE16-NEXT: scratch_load_b32 v114, off, s32 offset:388 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v81, off, s32 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v82, off, s32 offset:8 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v83, off, s32 offset:16 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v83, off, s32 offset:24 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v85, off, s32 offset:32 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v85, off, s32 offset:40 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v87, off, s32 offset:48 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v87, off, s32 offset:56 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v97, off, s32 offset:64 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v97, off, s32 offset:72 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v98, off, s32 offset:80 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v99, off, s32 offset:88 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v100, off, s32 offset:96 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v101, off, s32 offset:104 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v80, off, s32 offset:216 +; GFX11-TRUE16-NEXT: scratch_load_b32 v113, off, s32 offset:388 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v83, off, s32 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v84, off, s32 offset:8 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v84, off, s32 offset:16 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v85, off, s32 offset:24 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v85, off, s32 offset:32 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v96, off, s32 offset:40 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v96, off, s32 offset:48 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v97, off, s32 offset:56 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v97, off, s32 offset:64 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v98, off, s32 offset:72 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v100, off, s32 offset:80 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v101, off, s32 offset:88 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v101, off, s32 offset:96 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v102, off, s32 offset:104 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v102, off, s32 offset:112 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v160, off, s32 offset:120 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v160, off, s32 offset:128 @@ -123474,121 +122796,123 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v164, off, s32 offset:192 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v165, off, s32 offset:200 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v165, off, s32 offset:208 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v55, off, s32 offset:212 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v65, off, s32 offset:204 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v65, off, s32 offset:196 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v67, off, s32 offset:188 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v68, off, s32 offset:180 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v69, off, s32 offset:172 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v69, off, s32 offset:164 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v80, off, s32 offset:156 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v80, off, s32 offset:148 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v53, off, s32 offset:212 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v64, off, s32 offset:204 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v64, off, s32 offset:196 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v65, off, s32 offset:188 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v65, off, s32 offset:180 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v68, off, s32 offset:172 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v69, off, s32 offset:164 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v69, off, s32 offset:156 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v70, off, s32 offset:148 ; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v81, off, s32 offset:140 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v82, off, s32 offset:132 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v84, off, s32 offset:124 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v84, off, s32 offset:116 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v86, off, s32 offset:108 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v86, off, s32 offset:100 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v96, off, s32 offset:92 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v96, off, s32 offset:84 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v98, off, s32 offset:76 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v99, off, s32 offset:68 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v100, off, s32 offset:60 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v101, off, s32 offset:52 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v103, off, s32 offset:44 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v103, off, s32 offset:36 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v112, off, s32 offset:28 -; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v113, off, s32 offset:20 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v81, off, s32 offset:132 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v82, off, s32 offset:124 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v82, off, s32 offset:116 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v83, off, s32 offset:108 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v86, off, s32 offset:100 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v86, off, s32 offset:92 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v87, off, s32 offset:84 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v87, off, s32 offset:76 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v98, off, s32 offset:68 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v99, off, s32 offset:60 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v99, off, s32 offset:52 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v100, off, s32 offset:44 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v103, off, s32 offset:36 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v103, off, s32 offset:28 +; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v112, off, s32 offset:20 ; GFX11-TRUE16-NEXT: s_clause 0x1 -; GFX11-TRUE16-NEXT: scratch_load_d16_b16 v115, off, s32 offset:12 +; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v112, off, s32 offset:12 ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v115, off, s32 offset:4 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v117.l, v30.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v118.h, v28.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v119.l, v26.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v119.h, v24.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.l, v22.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.h, v20.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.l, v18.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v116.l, v30.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v116.h, v28.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v119.h, v26.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.l, v24.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.h, v22.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.l, v20.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v18.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v132.h, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v146.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.l, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.l, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v6.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v146.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.l, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.l, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v144.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v145.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v150.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v151.h, 8, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v146.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.l, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v147.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.l, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v148.h, 8, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v134.h, 8, v19.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v135.l, 8, v21.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v135.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v133.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v133.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v131.h, 8, v29.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(54) -; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v114 -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(17) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v132.l, 8, v81.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(16) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v129.l, 8, v82.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v129.h, 8, v83.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v128.l, 8, v83.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v128.h, 8, v85.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v117.h, 8, v85.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v118.l, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v116.l, 8, v87.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v116.h, 8, v97.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.l, 8, v97.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(9) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.h, 8, v98.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(3) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v112.h, 8, v99.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v113.l, 8, v100.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.l, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.h, 8, v102.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v100.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v101.l, 8, v160.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v98.h, 8, v161.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v99.l, 8, v161.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.l, 8, v162.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.h, 8, v162.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v87.l, 8, v163.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v87.h, 8, v163.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.l, 8, v164.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.h, 8, v164.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v83.l, 8, v165.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v83.h, 8, v165.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v81.h, 8, v71.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v82.l, 8, v71.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.l, 8, v70.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v70.l, 8, v68.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v70.h, 8, v67.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v67.h, 8, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v68.l, 8, v66.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.l, 8, v64.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.h, 8, v64.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v64.l, 8, v55.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v64.h, 8, v54.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v54.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.l, 8, v53.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v52.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.h, 8, v52.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.l, 8, v51.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.l, 8, v50.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.h, 8, v31.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v144.l, 8, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v144.h, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v130.l, 8, v29.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(62) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v51.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v51.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(56) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.h, 8, v50.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v54.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v54.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(26) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.h, 8, v53.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v67.l, 8, v67.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.h, 8, v66.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v66.l, 8, v66.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.h, 8, v71.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v71.l, 8, v71.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(18) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v70.h, 8, v70.h +; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v113 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v130.h, 8, v83.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v131.l, 8, v84.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v131.h, 8, v84.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v132.l, 8, v85.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v117.l, 8, v85.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v117.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v118.l, 8, v96.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v118.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v119.l, 8, v97.h +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(8) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v113.l, 8, v98.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v113.h, 8, v100.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.l, 8, v101.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v114.h, 8, v101.h +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v115.l, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v100.h, 8, v102.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v101.l, 8, v160.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v101.h, 8, v160.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.l, 8, v161.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v102.h, 8, v161.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v96.l, 8, v162.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v96.h, 8, v162.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.l, 8, v163.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v97.h, 8, v163.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v98.l, 8, v164.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v83.h, 8, v164.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v84.l, 8, v165.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v84.h, 8, v165.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.l, 8, v80.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v85.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v80.l, 8, v68.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v80.h, 8, v67.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v67.h, 8, v55.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v68.l, 8, v55.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.l, 8, v52.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.h, 8, v52.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.l, 8, v31.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.h, 8, v31.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) @@ -123599,215 +122923,179 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB74_4 ; GFX11-TRUE16-NEXT: .LBB74_2: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; GFX11-TRUE16-NEXT: .LBB74_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v149.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v146.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v146.l -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v0.l, v151.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v151.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v0.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v150.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v3.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v145.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v144.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v149, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v1.h, v150.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v146.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v145.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v145.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v134.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v133.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v133.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v132.h -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v131.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v148.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v130.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v149, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v2.l, v148.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v2.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v134.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v134.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v145.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v130.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v149, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v3.l, v147.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v147.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v3.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v135.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v119.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v119.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v118.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v149, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v4.l, v144.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v4.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v133.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v117.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v129.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v129.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v128.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v119.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v116.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v116.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v115.h -; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v115.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v149, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v5.l, v135.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v5.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v132.l -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v129.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v113.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v112.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v149, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v6.l, v133.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v6.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v103.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v128.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v103.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v101.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v149, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v7.l, v131.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v7.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v118.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v100.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v99.h -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v98.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v149, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v8.l, v129.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v8.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v116.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v114.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v96.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v96.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v149, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v9.l, v128.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v9.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v86.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v113.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v86.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v84.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v149, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v10.l, v117.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v10.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v102.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v84.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v82.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v112.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v112.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v103.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v103.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v100.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v99.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v99.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v98.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v87.h +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v87.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v86.h +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v86.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v83.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v82.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v82.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v81.h ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v81.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v149, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v11.l, v116.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v11.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v101.l -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v99.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v80.h -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v80.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v149, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v12.l, v114.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v12.l, v149.h -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v69.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v97.h -; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v69.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v68.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v149, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v13.l, v112.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v13.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v87.h -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v67.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v65.h -; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v65.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v149, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v14.l, v102.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v14.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v85.h -; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v83.h -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v55.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v70.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v69.h +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v69.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v68.h +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v65.h +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v65.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v64.h +; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v64.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v50.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v149, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v15.l, v100.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v15.l, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v49.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v82.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v49.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v48.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v149, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v16.l, v98.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v16.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v71.h ; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v48.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v39.h ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v39.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v149, v16 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v17.l, v97.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v17.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v70.h -; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v68.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v38.h ; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v38.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v149, v17 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v18.l, v87.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v18.l, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v37.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v66.h ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v37.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v36.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v149, v18 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v19.l, v85.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v19.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v64.h ; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v35.h ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v35.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v149, v19 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v20.l, v83.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v20.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v55.l -; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v34.h ; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v34.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v149, v20 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v21.l, v81.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v21.l, v149.h ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v33.l ; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v149, v21 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v22.l, v71.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v22.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v52.l ; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v32.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v150.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v150.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v151.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v151.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v146.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v147.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v147.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v148.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v148.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v134.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v135.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v135.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v144.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v144.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v130.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v130.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v131.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v131.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v132.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v117.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v117.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v118.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v118.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v119.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v113.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v113.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v114.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v114.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v115.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v100.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v101.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v101.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v16.l, v102.l +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v102.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v17.l, v96.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v96.h +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v18.l, v97.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v97.h +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v19.l, v98.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v83.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v20.l, v84.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v84.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v21.l, v85.l +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v85.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v22.l, v70.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v71.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v23.l, v71.h +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v80.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v24.l, v80.h +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v66.l +; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v25.l, v66.h +; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v67.l +; GFX11-TRUE16-NEXT: v_or_b16 v26.l, v26.l, v67.h +; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v68.l +; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v27.l, v53.h +; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v28.l, v28.l, v54.h +; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v55.l +; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v29.l, v55.h +; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v50.h +; GFX11-TRUE16-NEXT: v_or_b16 v30.l, v30.l, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v51.h +; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v31.l, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v52.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_lo16 @@ -123829,433 +123117,329 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v149, v22 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v23.l, v70.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v23.l, v149.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v51.l -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v149, v23 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v24.l, v67.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v24.l, v149.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v149, v24 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v25.l, v66.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v25.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v149, v25 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v26.l, v64.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v26.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v149, v26 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v27.l, v54.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v27.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v149, v27 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v28.l, v53.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v28.l, v149.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v149, v28 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v29.l, v52.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v29.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v149, v29 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v30.l, v51.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.l, v149.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v149, v30 -; GFX11-TRUE16-NEXT: v_or_b16 v149.l, v31.l, v50.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v149.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v149, v31 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB74_2 ; GFX11-TRUE16-NEXT: .LBB74_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v149.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v149.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v146.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v146.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v146.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v145.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v145.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v134.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v133.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v133.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v132.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v129.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v129.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v128.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v128.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v119.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v116.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v116.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v115.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v112.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, v112.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, v103.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v103.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v100.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, v99.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, v99.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v98.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v87.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, v87.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, v86.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v86.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v83.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, v82.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, v82.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, v81.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, v81.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.l, v70.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, v69.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, v69.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, v68.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.l, v65.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.h, v65.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, v64.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, v64.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, v53.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.h, v50.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, v49.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, v49.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.l, v48.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.h, v48.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, v39.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, v39.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, v38.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.h, v38.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, v37.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, v37.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.l, v36.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.h, v36.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, v35.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, v35.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.l, v34.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.h, v34.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, v33.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, v33.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, v32.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.h, v32.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v151.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v151.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v150.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v150.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v145.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v144.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v134.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v31, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v134.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v148.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v148.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v31, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v31.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v147.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v147.h, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v132.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v131.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v31, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v130.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v130.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v144.h, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v145.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v31, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v135.l, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v135.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v119.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v119.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v31, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v118.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v117.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v133.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v133.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v31, v8 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v131.h, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v132.l, v6.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v115.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v115.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v31, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v113.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v112.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v129.l, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v129.h, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v31, v10 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v128.l, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v128.h, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v103.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v103.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v31, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v101.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v100.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v117.h, v9.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v118.l, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v31, v12 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v116.l, v10.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v116.h, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v99.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v98.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v31, v13 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v96.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v96.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v114.l, v11.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v114.h, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v31, v14 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v112.h, v12.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v113.l, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v86.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v86.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v31, v15 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v84.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v84.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v102.l, v13.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v102.h, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v31, v16 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v100.h, v14.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v101.l, v14.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, v82.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, v81.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v31, v17 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v16.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, v80.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, v80.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v98.h, v15.l -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v99.l, v15.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v31, v18 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v17.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.h, 0x300, v17.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v97.l, v16.l -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v97.h, v16.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, v69.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, v69.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v31, v19 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v17.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, 0x300, v17.h -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, v68.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, v67.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v87.l, v17.l -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v87.h, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v18.l ; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v31, v20 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v19.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.h, 0x300, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v85.l, v18.l -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v85.h, v18.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, v65.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, v65.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v31, v21 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v19.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, 0x300, v19.h -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, v55.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, v50.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v83.l, v19.l -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v83.h, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v31, v22 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v21.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.h, 0x300, v21.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v81.h, v20.l -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v82.l, v20.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, v49.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, v49.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v31, v23 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v21.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, 0x300, v21.h -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, v48.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, v48.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v71.l, v21.l -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v71.h, v21.h +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v31, v24 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v23.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.h, 0x300, v23.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v70.l, v22.l -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v70.h, v22.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, v39.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, v39.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v31, v25 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v23.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, 0x300, v23.h -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v24.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, v38.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, v38.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v67.h, v23.l -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v68.l, v23.h +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v31, v26 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v25.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.h, 0x300, v25.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v66.l, v24.l -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v66.h, v24.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, v37.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, v37.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v31, v27 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v25.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, 0x300, v25.h -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, v36.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, v36.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v64.l, v25.l -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v64.h, v25.h +; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v31, v28 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v27.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.h, 0x300, v27.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v54.h, v26.l -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v55.l, v26.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, v35.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, v35.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v31, v29 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v27.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, 0x300, v27.h -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, v34.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, v34.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v53.h, v27.l -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v54.l, v27.h +; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l +; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v31, v30 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v29.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v34.h, 0x300, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v52.h, v28.l -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v53.l, v28.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, v33.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, v33.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v31, v34 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v29.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v33.h, 0x300, v29.h -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v30.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, v32.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, v32.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v32.l, v51.h, v29.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v52.l, v29.h +; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v31, v33 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v32.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v33.h, 0x300, v32.h -; GFX11-TRUE16-NEXT: v_or_b16 v32.l, v50.h, v30.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v51.l, v30.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v31, v33 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v32.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v32.h, 0x300, v32.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v31, v32 +; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v31.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v150.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v150.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v151.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v151.h, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v146.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v147.l, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v147.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v148.l, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v148.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v134.h, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v135.l, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v135.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v144.l, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v144.h, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v130.l, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v130.h, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v131.l, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v131.h, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v132.l, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v117.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v117.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v118.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v118.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v119.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v113.l, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v113.h, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v114.l, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v114.h, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v115.l, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v100.h, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v101.l, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v101.h, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v102.l, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v102.h, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v96.l, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v96.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v97.l, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v97.h, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v98.l, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v83.h, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v84.l, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v84.h, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v85.l, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v85.h, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v70.h, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v71.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v71.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v80.l, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v80.h, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v66.l, v24.h +; GFX11-TRUE16-NEXT: v_or_b16 v25.l, v66.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v67.l, v25.h +; GFX11-TRUE16-NEXT: v_or_b16 v26.l, v67.h, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v68.l, v26.h +; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v53.h, v27.l +; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v54.l, v27.h +; GFX11-TRUE16-NEXT: v_or_b16 v28.l, v54.h, v28.l +; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v55.l, v28.h +; GFX11-TRUE16-NEXT: v_or_b16 v29.l, v55.h, v29.l +; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v50.h, v29.h +; GFX11-TRUE16-NEXT: v_or_b16 v30.l, v51.l, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v51.h, v30.h +; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v52.l, v31.l +; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v52.h, v31.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, 0x300, v8.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v8.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v9.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v9.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v10.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v10.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, 0x300, v11.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v11.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, 0x300, v12.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v12.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, 0x300, v13.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v13.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, 0x300, v14.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v14.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, 0x300, v15.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v15.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.l, 0x300, v16.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v16.h, 0x300, v16.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.l, 0x300, v17.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, 0x300, v17.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.l, 0x300, v18.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, 0x300, v18.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.l, 0x300, v19.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v19.h, 0x300, v19.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.l, 0x300, v20.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v20.h, 0x300, v20.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v21.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.h, 0x300, v21.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.l, 0x300, v22.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v22.h, 0x300, v22.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.l, 0x300, v23.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v23.h, 0x300, v23.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.l, 0x300, v24.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v24.h, 0x300, v24.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v25.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.h, 0x300, v25.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.l, 0x300, v26.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v26.h, 0x300, v26.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.l, 0x300, v27.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v27.h, 0x300, v27.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.l, 0x300, v28.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v28.h, 0x300, v28.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.l, 0x300, v29.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v29.h, 0x300, v29.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.l, 0x300, v30.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v30.h, 0x300, v30.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.l, 0x300, v31.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v31.h, 0x300, v31.h ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -161654,179 +160838,182 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: s_clause 0x1f -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:236 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:232 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:228 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:224 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:220 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:216 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:212 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:208 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:204 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:200 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:196 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:192 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:188 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:184 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:180 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:176 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:172 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:168 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:164 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:160 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:156 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:152 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:148 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:144 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:140 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:136 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:132 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:128 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:124 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:120 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:116 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:112 -; GFX11-TRUE16-NEXT: s_clause 0x18 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:108 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:104 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:100 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:96 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:92 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:88 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:84 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:80 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:76 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:72 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:68 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:64 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:60 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:56 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:52 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:48 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:44 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:40 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:36 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:32 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:28 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:24 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:20 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:16 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:12 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:248 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:244 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:240 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:236 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:232 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:228 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:224 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:220 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:216 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:212 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:208 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:204 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:200 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:196 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:192 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:188 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:184 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:180 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:176 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:172 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:168 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:164 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:160 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:156 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:152 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:148 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:144 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:140 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:136 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:132 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:128 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:124 +; GFX11-TRUE16-NEXT: s_clause 0x1b +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:120 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:116 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:112 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:108 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:104 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:100 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:96 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:92 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:88 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:84 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:80 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:76 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:72 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:68 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:64 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:60 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:56 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:52 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:48 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:44 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:40 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:36 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:28 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:24 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:20 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:16 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:12 ; GFX11-TRUE16-NEXT: s_clause 0x2 ; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:8 -; GFX11-TRUE16-NEXT: scratch_load_b32 v99, off, s32 offset:4 -; GFX11-TRUE16-NEXT: scratch_load_b32 v98, off, s32 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr178_hi16 +; GFX11-TRUE16-NEXT: scratch_load_b32 v81, off, s32 offset:4 +; GFX11-TRUE16-NEXT: scratch_load_b32 v80, off, s32 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr181_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr152_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr177_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr180_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr143_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr141_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr183_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr140_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr139_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr40_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr138_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr179_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr137_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr56_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr182_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr127_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr125_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr41_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr123_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr40_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr79_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr111_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr60_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr72_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr109_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr46_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr107_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr106_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr95_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr104_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr92_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr76_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr93_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr74_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr127_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr89_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr79_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr104_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr106_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr77_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr75_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr142_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr73_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr125_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr63_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr62_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr139_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr61_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr143_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr141_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr59_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr155_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr57_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr154_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr47_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr124_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr44_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr142_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr122_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr110_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr138_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr137_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr94_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr124_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr122_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr92_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr90_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr88_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr110_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr163_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr95_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr77_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr74_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr72_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr94_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr93_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr165_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr90_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr62_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr59_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr164_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr57_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr163_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr46_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr44_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr88_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr76_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr166_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr165_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr41_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr183_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr73_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr63_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr177_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr60_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr182_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr181_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr180_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr56_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr179_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr178_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_lo16 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2) ; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v31 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_hi16 @@ -161835,136 +161022,136 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB90_2 ; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[15:16] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[84:85], 24, v[27:28] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[13:14] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[96:97], 24, v[15:16] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[99:100], 24, v[13:14] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[11:12] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[9:10] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[130:131], 24, v[7:8] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[144:145], 24, v[3:4] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[85:86], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v16 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v13 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v5 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 24, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 8, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v3 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 24, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v79, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 24, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v127, 8, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v3 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v141, 24, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v143, 8, v2 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v152, 8, v1 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 24, v99 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 8, v99 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v42, 24, v81 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v81 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v98 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 24, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v74, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v17 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[131:132], 24, v[5:6] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 8, v80 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v56, 24, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v60, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v76, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v142, 8, v17 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[128:129], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[134:135], 24, v[5:6] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[145:146], 24, v[1:2] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[98:99] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[29:30] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[23:24] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[102:103], 24, v[21:22] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[19:20] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v1.l +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[82:83], 24, v[80:81] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[29:30] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[27:28] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[97:98], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[112:113], 24, v[23:24] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[21:22] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[19:20] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[132:133], 24, v[17:18] +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v181.h, v1.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v2.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v65.h, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v40.h, v3.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v3.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.h, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v182.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v56.h, v5.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v41.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.h, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v42.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v40.h, v6.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v6.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v79.h, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v60.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.h, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v106.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.h, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v76.h, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.h, v10.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v127.h, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.h, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v104.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v142.h, v13.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.h, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v143.h, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.h, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v141.h, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v16.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v17.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v72.h, v7.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v70.h, v7.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v46.h, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v71.h, v8.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v104.h, v9.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v84.h, v9.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v74.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v85.h, v10.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v136.h, v11.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v102.h, v11.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v106.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v103.h, v12.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v153.h, v13.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.h, v13.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v139.h, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.h, v14.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v155.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v147.h, v15.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v154.h, v16.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.h, v16.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v17.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v17.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v18.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.h, v19.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v151.h, v19.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v147.h, v20.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v150.h, v20.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v20.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v150.h, v21.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v161.h, v21.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v21.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v22.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v160.h, v22.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v22.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v160.h, v23.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v163.h, v23.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v23.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v151.h, v24.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v162.h, v24.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v24.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v162.h, v25.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v165.h, v25.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v25.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v161.h, v26.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v164.h, v26.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v26.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v164.h, v27.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v27.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v27.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v163.h, v28.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v166.h, v28.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v28.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v166.h, v29.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v29.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v165.h, v30.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v30.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v30.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v98.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v54.h, v98.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v99.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v99.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v80.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v54.h, v80.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v81.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v81.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5 @@ -161980,7 +161167,7 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81 ; GFX11-TRUE16-NEXT: .LBB90_2: ; %Flow ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB90_4 @@ -162019,10 +161206,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v37, v48, v17, 0x7fff ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 24, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 24, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v32 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v135, v37, v49, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v149, v37, v49, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff0000, v19 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19 @@ -162036,97 +161223,101 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_add3_u32 v33, v33, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, 0x400000, v18 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v135.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v149.h ; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v36, 16, 1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v147, v33, v35, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v150, v33, v35, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v19 ; GFX11-TRUE16-NEXT: v_add3_u32 v20, v20, v36, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v31 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[132:133], 24, v[31:32] ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v34, v17, v34, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v19, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v34.l, v147.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v142, 8, v31 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v19, 0x7fff -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v34 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 8, v34 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v148, v17, v33, vcc_lo +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v151, v17, v33 :: v_dual_and_b32 v18, 0xffff0000, v22 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v33, v20, v35 :: v_dual_and_b32 v18, 0xffff0000, v22 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_dual_add_f32 v18, 0x40c00000, v18 :: v_dual_cndmask_b32 v33, v20, v35 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v21 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18 -; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_add_f32 v22, 0x40c00000, v22 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_dual_add_f32 v20, 0x40c00000, v20 :: v_dual_add_f32 v21, 0x40c00000, v21 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v34.l, v150.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v33.l, v151.h ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v22 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 24, v34 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v34 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v149, v19, v35 :: v_dual_lshlrev_b32 v22, 16, v24 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v33 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v160, v19, v35, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v24 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v21 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v36, v17, v36, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v36.l, v149.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v23 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v150, v17, v24, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v161, v17, v24, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v23 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v36 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v36.l, v160.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v35, v19, v35, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v33.l, v148.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v36 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v35.l, v161.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v26 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v33 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v151, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 8, v35 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v162, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v26 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v17, v24, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v38.l, v151.h ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v160, v17, v23 :: v_dual_lshlrev_b32 v21, 16, v25 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 24, v36 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v36 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v163, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 @@ -162139,8 +161330,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v161, v19, v23 :: v_dual_lshlrev_b32 v22, 16, v28 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v28 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v37.l, v163.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v164, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 @@ -162153,10 +161346,9 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v27 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v49.l, v161.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v35.l, v150.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v162, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v37 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v165, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 @@ -162169,10 +161361,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v49 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v49 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v35 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v163, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v38.l, v162.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v48.l, v165.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v166, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 @@ -162185,10 +161377,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v29 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v51.l, v163.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v74, 24, v38 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v38 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v164, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[112:113], 24, v[37:38] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 24, v38 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v38 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v167, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 @@ -162201,14 +161393,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v99 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v51 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v51 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v165, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v81 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v50.l, v167.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v176, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v99 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v81 ; GFX11-TRUE16-NEXT: v_dual_add_f32 v22, 0x40c00000, v22 :: v_dual_cndmask_b32 v53, v17, v24 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 @@ -162217,14 +161409,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v98 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v37.l, v160.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v53.l, v165.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v166, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v80 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v49.l, v164.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v177, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v98 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v80 ; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_cndmask_b32 v52, v19, v24 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 @@ -162233,10 +161425,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 24, v53 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 8, v53 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v37 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v167, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v52.l, v177.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 24, v49 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v49 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v178, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v2 @@ -162249,10 +161441,9 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v55.l, v167.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v2 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v176, v17, v22, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v179, v17, v22, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v1 @@ -162263,13 +161454,12 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff ; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_add_f32 v20, 0x40c00000, v20 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v2, 0x7fff -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 24, v55 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 8, v55 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v54.l, v179.h ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v20, 16, 1 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v177, v19, v21, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v180, v19, v21, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v4 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 @@ -162282,11 +161472,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v1, 0x7fff -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v48.l, v162.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v65.l, v177.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 8, v48 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v18, 16, 1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v178, v17, v19, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v181, v17, v19, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v3 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 @@ -162301,9 +161490,9 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v6 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 24, v65 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v65 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v179, v2, v19, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v51.l, v166.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v64.l, v181.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v182, v2, v19, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v17, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v3 @@ -162313,13 +161502,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v17, 0x7fff ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v67.l, v179.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 24, v51 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v50.l, v164.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 24, v67 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v40, v1, v18, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v53.l, v176.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v51 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v183, v1, v18, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v4, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6 @@ -162330,13 +161519,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[84:85], 24, v[50:51] +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v66.l, v183.h ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[85:86], 24, v[48:49] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[37:38] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 8, v67 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v42, v2, v17, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v56, 24, v53 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v53 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v76, 8, v50 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v40, v2, v17, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v8 @@ -162350,28 +161539,27 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v50 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v56, v2, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v41, v2, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v5, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v48 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v68, v1, v17, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v2, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v7 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v10 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v56.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v41.h ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v60, v3, v5, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v46, v3, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v7 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v10 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2 -; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_cndmask_b32 v83, v1, v8 +; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_cndmask_b32 v71, v1, v8 ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 @@ -162380,29 +161568,29 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.l, v60.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v52.l, v166.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v1, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v71.l, v46.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[52:53] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v70, v1, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v9 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v79, v4, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v72, v4, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v12 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v11 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.l, v79.h -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v76, v1, v4 :: v_dual_lshlrev_b32 v1, 16, v9 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v55.l, v178.h +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v74, v1, v4 :: v_dual_lshlrev_b32 v1, 16, v9 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v2, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v85, v2, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 @@ -162410,40 +161598,40 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.l, v76.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v54.l, v176.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[130:131], 24, v[82:83] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v2, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v67.l, v182.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v85.l, v74.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v70.l, v72.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v84, v2, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[54:55] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[52:53] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[128:129], 24, v[70:71] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[82:83], 24, v[54:55] ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v6, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v106, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v104, v2, v3, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v7, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v14 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.l, v106.h -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v104, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v84.l, v104.h +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v106, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[96:97] +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v65.l, v180.h ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v113, v3, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v103, v3, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v13 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.l, v104.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[31:32] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v112, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v103.l, v106.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[84:85] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v102, v2, v3, vcc_lo ; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v4 :: v_dual_add_f32 v3, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1 @@ -162452,8 +161640,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v64.l, v178.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v127, v4, v5, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[33:34] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v136, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 @@ -162461,19 +161649,19 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v129, v4, v5, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v131, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v16 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v125, v6, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v139, v6, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v15 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.l, v40.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v128, v3, v4, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.l, v139.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v130, v3, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v16 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2 @@ -162481,11 +161669,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.l, v125.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.l, v127.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v102.l, v136.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v40.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v142, v4, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v153, v4, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 @@ -162494,389 +161682,322 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8 ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.l, v142.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v141, v2, v9, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.l, v153.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v154, v2, v9, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v42.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[112:113] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v143, v7, v11, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[99:100], 24, v[130:131] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[102:103] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v155, v7, v11, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[144:145], 24, v[66:67] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[131:132], 24, v[68:69] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[134:135], 24, v[68:69] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[145:146], 24, v[64:65] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v134, v4, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v148, v4, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v141.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[33:34] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 24, v129 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 8, v129 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v133, v2, v3, vcc_lo -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.l, v143.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 24, v134 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v134 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v128 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v113 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[133:134] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[128:129] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[102:103], 24, v[35:36] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v133 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 8, v113 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 8, v112 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 24, v97 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v97 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v96 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 24, v83 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v83 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 8, v82 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v69 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v69 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v68 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v66 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.l, v154.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[50:51] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[35:36] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v131 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v147, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v147.l, v155.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v148 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v148 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 8, v131 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v130 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[96:97], 24, v[147:148] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[97:98], 24, v[48:49] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v147 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v103 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v103 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v79, 8, v102 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 24, v85 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 8, v85 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v84 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v71 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 8, v71 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v70 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 24, v69 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 8, v69 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v68 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 24, v67 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v127, 8, v67 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v66 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v141, 24, v65 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v143, 8, v65 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v152, 8, v64 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v54 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v52 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v42, 24, v55 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v55 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 8, v54 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v60, 8, v52 ; GFX11-TRUE16-NEXT: .LBB90_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v178.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v181.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v152.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v64.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v145.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v65.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.l, v1.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v139.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v180.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v143.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v65.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v141.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v183.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v140.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v66.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v1.h ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v177.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v140.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v144.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v1 -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v67.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v2.l, v2.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v3.l, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v40.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v138.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v136.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v3.l, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v4.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v6.l, v6.h +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v182.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v127.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v67.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v125.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v41.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v123.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v68.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v5, v2 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v131.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v3.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.l, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v179.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v137.l -; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v69.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v40.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v121.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v5, v3 -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v82.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v4.l, v4.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.l, v6.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v56.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v126.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v130.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v83.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v107.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v5.l, v5.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v6.l, v6.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v7.l, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v42.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v123.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v96.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v117.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v5, v6 -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v97.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v7.l, v7.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v8.l, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v79.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v111.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v91.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v112.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v5, v7 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v114.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v8.l, v8.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.l, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v60.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v109.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v113.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v75.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v5, v8 -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v128.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v9.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v106.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v95.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v129.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v5, v9 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v61.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v10.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v7.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v8.l, v8.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v69.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v111.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v72.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v109.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v70.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v46.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v107.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v71.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v105.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v8.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v9.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v10.h, v11.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v104.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v92.l +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v85.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v89.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v136.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v79.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v102.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v114.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v106.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v77.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v103.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v75.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v84.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v74.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v91.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v5.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.h, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v153.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v62.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v130.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v99.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v139.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v61.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v131.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v59.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v155.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v57.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v10.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v11.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v6.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v7.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v8.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.l, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v147.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v96.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v154.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v47.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v148.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v44.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v149.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v142.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v31.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v132.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v10.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v11.h, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v39.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v138.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v32.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v137.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v151.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v126.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v33.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v118.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v150.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v124.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v10.h ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v76.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v93.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v133.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v100.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v5, v10 -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v134.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v11.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v12.l, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v127.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v89.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v45.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v5, v11 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v12.l, v12.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v13.l, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v104.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v78.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v120.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v5, v12 -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v13.l, v13.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.l, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v142.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v73.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v34.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v5, v13 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v105.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v14.l, v14.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v15.l, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v125.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v63.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v35.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v102.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v5, v14 -; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v36.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v15.l, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v12.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v13.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v14.l, v14.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v34.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v122.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v161.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v120.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v35.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v115.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v160.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v110.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v36.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v108.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v15.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v16.h, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v163.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v95.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v37.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v112.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v162.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v94.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v38.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v93.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v165.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v90.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v15.h ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v143.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v58.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v90.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v37.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v5, v15 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v86.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v16.l, v16.h -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v17.l, v17.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v141.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v47.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v38.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v74.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v5, v16 -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v48.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v17.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v18.l, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v135.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v124.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v85.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v49.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v5, v17 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v59.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v18.l, v18.h -; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.l, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v122.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v50.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v84.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v5, v18 -; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v51.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v19.l, v19.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v20.l, v20.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v148.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v110.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v44.l -; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v52.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v5, v19 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v80.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v20.l, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v17.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v18.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v19.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v48.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v97.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v164.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v88.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v49.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v78.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v167.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v76.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v50.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v100.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v18.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.h, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v20.h, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v21.h, v22.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v166.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v73.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v51.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v63.l +; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v177.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v60.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v52.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v86.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v176.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v58.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v20.l, v20.h ; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v21.l, v21.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v147.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v108.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v53.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v183.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v5, v20 -; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v54.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v21.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v22.l, v22.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v150.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v94.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_and_b16 v34.l, 0xff, v55.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v5, v21 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v180.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v22.l, v22.h -; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v23.l, v23.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v149.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v92.l -; GFX11-TRUE16-NEXT: s_clause 0x1 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[6:9], off offset:16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v5, v22 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v23.l, v23.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.l, v24.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v160.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v88.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v5, v23 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v24.l, v24.h -; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v25.l, v25.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v151.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v77.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v5, v24 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v25.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v26.l, v26.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v162.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v72.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v5, v25 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v26.l, v26.h -; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v27.l, v27.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v161.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v62.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v5, v26 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v27.l, v27.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v28.l, v28.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v164.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v57.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v5, v27 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v28.l, v28.h -; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v29.l, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v163.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v46.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v5, v28 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v29.l, v29.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v30.l, v30.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v166.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v43.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v5, v29 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v30.l, v30.h -; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v31.l, v31.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v165.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v41.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v5, v30 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v31.l, v31.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v32.l, v32.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v176.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v182.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v5, v31 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v32.l, v32.h -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v33.l, v33.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v167.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v181.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, v5, v32 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v33.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v33.h, v34.l, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v5.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, v5, v33 +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v22.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v23.l, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v24.l, v24.h +; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v53.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v56.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v179.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.l, 8, v45.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v54.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v82.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v178.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.l, 8, v43.l +; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v55.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v42.l +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v23.h, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v25.h, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v26.h, v27.l ; GFX11-TRUE16-NEXT: s_clause 0x5 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[10:13], off offset:32 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[14:17], off offset:48 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[18:21], off offset:64 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[22:25], off offset:80 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[26:29], off offset:96 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[30:33], off offset:112 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:48 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:64 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112 ; GFX11-TRUE16-NEXT: s_clause 0x1f -; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:12 -; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:16 -; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:20 -; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:24 -; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:28 -; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:32 -; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:36 -; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:40 -; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:44 -; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:48 -; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:52 -; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:56 -; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:60 -; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:64 -; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:68 -; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:72 -; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:76 -; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:80 -; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:84 -; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:88 -; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:92 -; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:96 -; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:100 -; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:104 -; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:108 -; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:112 -; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:116 -; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:120 -; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:124 -; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:128 -; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:132 -; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:136 -; GFX11-TRUE16-NEXT: s_clause 0x18 -; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:140 -; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:144 -; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:148 -; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:152 -; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:156 -; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:160 -; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:164 -; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:168 -; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:172 -; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:176 -; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:180 -; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:184 -; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:188 -; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:192 -; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:196 -; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:200 -; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:204 -; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:208 -; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:212 -; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:216 -; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:220 -; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:224 -; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:228 -; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:232 -; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:236 +; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:12 +; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:16 +; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:20 +; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:24 +; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:28 +; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:32 +; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:36 +; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:40 +; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:44 +; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:48 +; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:52 +; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:56 +; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:60 +; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:64 +; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:68 +; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:72 +; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:76 +; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:80 +; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:84 +; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:88 +; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:92 +; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:96 +; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:100 +; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:104 +; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:108 +; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:112 +; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:116 +; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:120 +; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:124 +; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:128 +; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:132 +; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:136 +; GFX11-TRUE16-NEXT: s_clause 0x1b +; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:140 +; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:144 +; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:148 +; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:152 +; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:156 +; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:160 +; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:164 +; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:168 +; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:172 +; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:176 +; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:180 +; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:184 +; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:188 +; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:192 +; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:196 +; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:200 +; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:204 +; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:208 +; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:212 +; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:216 +; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:220 +; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:224 +; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:228 +; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:232 +; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:236 +; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:240 +; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:244 +; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:248 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -186713,69 +185834,69 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_b32 v33, off, s32 offset:8 ; GFX11-TRUE16-NEXT: scratch_load_b32 v32, off, s32 offset:4 ; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr166_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr165_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr164_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr163_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2) ; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v33 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_lo16 @@ -186784,95 +185905,91 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB94_2 ; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[15:16] ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[33:34], 24, v[31:32] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[67:68], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[13:14] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[11:12] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[66:67], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[5:6] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[34:35], 24, v[29:30] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[68:69], 24, v[5:6] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[27:28] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[13:14] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[53:54], 24, v[11:12] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[37:38], 24, v[27:28] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v164, 24, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v165, 8, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v166, 8, v1 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v165, 8, v3 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v166, 24, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v167, 8, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v176, 8, v1 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 24, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v31 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v164, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[9:10] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[3:4] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[1:2] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[23:24] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[52:53], 24, v[21:22] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[19:20] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[67:68], 24, v[17:18] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 8, v17 ; GFX11-TRUE16-NEXT: .LBB94_2: ; %Flow ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB94_4 ; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true +; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1] +; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1] +; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1] +; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1) ; GFX11-TRUE16-NEXT: v_pk_add_f16 v32, 0x200, v32 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: v_pk_add_f16 v31, 0x200, v31 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, v30 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, v6 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_pk_add_f16 v28, 0x200, v28 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_pk_add_f16 v27, 0x200, v27 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1] +; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, v12 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, v14 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, v13 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1] +; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1] @@ -186883,345 +186000,283 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_pk_add_f16 v23, 0x200, v23 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v26, 0x200, v26 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v25, 0x200, v25 op_sel_hi:[0,1] +; GFX11-TRUE16-NEXT: v_pk_add_f16 v30, 0x200, v30 op_sel_hi:[0,1] +; GFX11-TRUE16-NEXT: v_pk_add_f16 v29, 0x200, v29 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, v2 op_sel_hi:[0,1] ; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, v1 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, v15 op_sel_hi:[0,1] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[67:68], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1] +; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1] +; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, v10 op_sel_hi:[0,1] +; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[15:16] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[13:14] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[11:12] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[66:67], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[5:6] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[33:34], 24, v[31:32] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[68:69], 24, v[5:6] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[37:38], 24, v[27:28] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[9:10] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[3:4] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[1:2] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[34:35], 24, v[29:30] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[13:14] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[53:54], 24, v[11:12] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[27:28] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[23:24] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[52:53], 24, v[21:22] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[19:20] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[67:68], 24, v[17:18] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v164, 24, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v165, 8, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v166, 8, v1 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v165, 8, v3 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v166, 24, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v167, 8, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v176, 8, v1 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 24, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v31 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v164, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 8, v17 ; GFX11-TRUE16-NEXT: .LBB94_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v166.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v176.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, 0 +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v80.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v1.l, v33.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v165.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v39.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v164.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v33.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v167.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v34.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v166.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v165.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v39, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v2.l, v33.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v70.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v163.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v69.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v35.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v39, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v3.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v39.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v36.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v162.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v161.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v160.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v150.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v39, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v68.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v69.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v37.h, 8, v148.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v36.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v149.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v67.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v66.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v134.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v39, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v135.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v37.h, 8, v133.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v36.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v133.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v39, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v129.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v119.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v39, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v113.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v39, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v99.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v39, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v87.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v16.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v65.l -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v39, v16 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v150.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v148.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v39, v18 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v144.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v134.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v39, v20 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v130.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v128.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l -; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l -; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v39, v22 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v116.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v114.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v39, v24 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v102.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v100.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v39, v26 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v96.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v86.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v85.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l -; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l -; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v39, v28 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v29.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v34.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v84.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v83.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v31.l -; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v39, v29 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v30.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v34.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v82.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v33.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v32.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.h, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v39, v30 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v31.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v33.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v81.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v80.l ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v39, v31 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v32.l, v33.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v32.h, v33.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v39.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, v39, v32 +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v118.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v116.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v51.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v114.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v112.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v9.h, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v5.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.h, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v101.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v13.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v100.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v14.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v98.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v10.l, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v6.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v7.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v8.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.l, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v36.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v86.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v16.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v84.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v35.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v67.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v10.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v11.h, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v18.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v164.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v163.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v161.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v64.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v12.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v13.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v14.l, v14.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v20.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v149.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v21.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v147.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v21.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v52.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v22.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v145.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v22.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v135.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v15.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v16.h, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v132.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v23.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v24.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v24.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v17.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v18.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v19.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v25.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v38.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v26.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v117.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v26.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v115.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v113.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v37.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v18.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.h, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v20.h, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v21.h, v22.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v28.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v28.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v29.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v99.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v29.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v34.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v30.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v20.l, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v21.l, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v22.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v23.l, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v24.l, v24.h +; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v30.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v87.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.l, 8, v85.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v31.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v33.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v32.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.l, 8, v83.l +; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v32.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v82.l +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v23.h, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v25.h, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v26.h, v27.l ; GFX11-TRUE16-NEXT: s_clause 0x5 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:32 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:48 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:64 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:80 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[25:28], off offset:96 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[29:32], off offset:112 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:48 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:64 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: bitcast_v64f16_to_v128i8: @@ -209415,69 +208470,69 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_b32 v33, off, s32 offset:8 ; GFX11-TRUE16-NEXT: scratch_load_b32 v32, off, s32 offset:4 ; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr166_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr165_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr164_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr163_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(2) ; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v33 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_lo16 @@ -209486,95 +208541,91 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB98_2 ; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[15:16] ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[33:34], 24, v[31:32] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[67:68], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[13:14] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[11:12] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[66:67], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[5:6] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[34:35], 24, v[29:30] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[68:69], 24, v[5:6] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[27:28] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[13:14] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[53:54], 24, v[11:12] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[37:38], 24, v[27:28] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v164, 24, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v165, 8, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v166, 8, v1 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v165, 8, v3 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v166, 24, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v167, 8, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v176, 8, v1 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 24, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v31 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v164, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[9:10] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[3:4] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[1:2] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[23:24] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[52:53], 24, v[21:22] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[19:20] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[67:68], 24, v[17:18] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 8, v17 ; GFX11-TRUE16-NEXT: .LBB98_2: ; %Flow ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB98_4 ; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true +; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1) ; GFX11-TRUE16-NEXT: v_pk_add_u16 v32, v32, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: v_pk_add_u16 v31, v31, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, v30, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_pk_add_u16 v28, v28, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_pk_add_u16 v27, v27, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, v12, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0] @@ -209585,345 +208636,283 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_pk_add_u16 v23, v23, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v26, v26, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v25, v25, 3 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: v_pk_add_u16 v30, v30, 3 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: v_pk_add_u16 v29, v29, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0] ; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[67:68], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[15:16] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[13:14] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[11:12] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[66:67], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[5:6] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[33:34], 24, v[31:32] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[68:69], 24, v[5:6] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[37:38], 24, v[27:28] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[9:10] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[3:4] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[1:2] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[34:35], 24, v[29:30] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[13:14] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[53:54], 24, v[11:12] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[27:28] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[23:24] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[52:53], 24, v[21:22] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[19:20] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[67:68], 24, v[17:18] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v164, 24, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v165, 8, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v166, 8, v1 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v165, 8, v3 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v166, 24, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v167, 8, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v176, 8, v1 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 24, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v31 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v164, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v35, 8, v17 ; GFX11-TRUE16-NEXT: .LBB98_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v166.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v176.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, 0 +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v80.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v1.l, v33.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v165.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v39.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v164.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v33.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v167.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v34.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v166.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v165.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v39, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v2.l, v33.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v70.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v163.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v69.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v35.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v39, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v3.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v39.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v36.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v162.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v161.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v160.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v150.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v39, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v68.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v69.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v37.h, 8, v148.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v36.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v39, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v149.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v67.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v66.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v36.h, 8, v134.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v39, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v135.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v37.h, 8, v133.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v36.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v133.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v39, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v129.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v119.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v39, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v113.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v39, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v99.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v39, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v87.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v16.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v65.l -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v39, v16 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v150.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v148.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l -; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v39, v18 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v144.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v134.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l -; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v39, v20 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v130.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v128.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l -; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l -; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l -; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, v39, v22 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v116.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v114.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l -; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v39, v24 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v102.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v100.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l -; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v39, v26 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v96.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v86.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v85.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l -; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l -; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v39, v28 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v29.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v29.h, v34.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v84.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v83.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v31.l -; GFX11-TRUE16-NEXT: v_and_b16 v31.h, 0xff, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v39, v29 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v30.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v30.h, v34.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v82.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v33.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v32.l -; GFX11-TRUE16-NEXT: v_and_b16 v32.h, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v39, v30 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v31.l, v33.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v31.h, v33.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v81.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v80.l ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v39, v31 -; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v32.l, v33.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v32.h, v33.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v39.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, v39, v32 +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v118.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v116.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v51.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v114.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v112.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v35.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v9.h, v34.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v5.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.h, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v101.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v13.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v100.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v14.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v98.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v10.l, v35.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v6.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v7.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v8.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.l, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v36.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v86.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v16.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v84.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v35.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v67.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v10.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v11.h, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v18.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v164.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v163.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v161.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v64.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v12.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v13.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v14.l, v14.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v20.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v149.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v21.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v147.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v21.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v52.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v22.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v145.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v22.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v135.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v15.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v16.h, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v132.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v23.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v24.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v24.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v17.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v18.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v17.l, v19.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v25.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v38.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v26.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v117.l +; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v26.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v115.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v113.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v37.l +; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.l, v18.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.h, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.l, v20.h, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v21.h, v22.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v28.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v28.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v29.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v99.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v29.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v34.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v30.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_or_b16 v20.l, v20.l, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v21.l, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v22.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v23.l, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v22.l, v24.l, v24.h +; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v30.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v87.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.l, 8, v85.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v31.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v33.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v32.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.l, 8, v83.l +; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v32.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v82.l +; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.l, v23.h, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v25.h, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v26.h, v27.l ; GFX11-TRUE16-NEXT: s_clause 0x5 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:32 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:48 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:64 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:80 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[25:28], off offset:96 -; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[29:32], off offset:112 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:48 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:64 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96 +; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: bitcast_v64i16_to_v128i8: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll index 64b5ecc..582f31b 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll @@ -4125,19 +4125,19 @@ define <4 x i32> @bitcast_v16i8_to_v4i32(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v7.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v7.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v15.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v16 @@ -4152,94 +4152,71 @@ define <4 x i32> @bitcast_v16i8_to_v4i32(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB26_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v10.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v0.l, v8.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v1.h, v6.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v6.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v11, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v2.l, v5.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v11, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v3.l, v4.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v11, v3 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB26_2 ; GFX11-TRUE16-NEXT: .LBB26_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v10.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v9.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v7.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v12.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v14.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v8.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v9.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v7.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v6.h, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v12.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v9, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v14.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v5.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v9.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v4.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v9, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v7.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v8.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v4.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v5.l, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v6.l, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v9, v3 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -8614,19 +8591,19 @@ define <4 x float> @bitcast_v16i8_to_v4f32(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v7.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v7.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v15.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v16 @@ -8641,94 +8618,71 @@ define <4 x float> @bitcast_v16i8_to_v4f32(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB50_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v10.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v0.l, v8.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v1.h, v6.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v6.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v11, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v2.l, v5.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v11, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v3.l, v4.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v11, v3 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB50_2 ; GFX11-TRUE16-NEXT: .LBB50_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v10.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v9.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v7.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v12.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v14.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v8.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v9.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v7.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v6.h, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v12.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v9, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v14.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v5.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v9.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v4.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v9, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v7.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v8.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v4.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v5.l, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v6.l, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v9, v3 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -12703,19 +12657,19 @@ define <2 x i64> @bitcast_v16i8_to_v2i64(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v7.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v7.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v15.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v16 @@ -12730,94 +12684,71 @@ define <2 x i64> @bitcast_v16i8_to_v2i64(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB70_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v10.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v0.l, v8.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v1.h, v6.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v6.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v11, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v2.l, v5.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v11, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v3.l, v4.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v11, v3 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB70_2 ; GFX11-TRUE16-NEXT: .LBB70_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v10.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v9.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v7.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v12.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v14.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v8.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v9.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v7.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v6.h, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v12.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v9, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v14.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v5.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v9.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v4.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v9, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v7.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v8.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v4.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v5.l, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v6.l, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v9, v3 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -16408,19 +16339,19 @@ define <2 x double> @bitcast_v16i8_to_v2f64(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v7.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v7.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v15.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v16 @@ -16435,94 +16366,71 @@ define <2 x double> @bitcast_v16i8_to_v2f64(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB86_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v10.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v0.l, v8.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v1.h, v6.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v6.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v11, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v2.l, v5.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v11, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v3.l, v4.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v11, v3 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB86_2 ; GFX11-TRUE16-NEXT: .LBB86_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v10.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v9.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v7.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v12.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v14.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v8.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v9.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v7.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v6.h, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v12.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v9, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v14.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v5.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v9.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v4.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v9, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v7.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v8.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v4.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v5.l, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v6.l, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v9, v3 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -19833,19 +19741,19 @@ define <8 x i16> @bitcast_v16i8_to_v8i16(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v7.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v7.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v15.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v16 @@ -19860,94 +19768,71 @@ define <8 x i16> @bitcast_v16i8_to_v8i16(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB98_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v10.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v0.l, v8.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v1.h, v6.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v6.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v11, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v2.l, v5.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v11, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v3.l, v4.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v11, v3 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB98_2 ; GFX11-TRUE16-NEXT: .LBB98_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v10.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v9.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v7.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v12.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v14.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v8.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v9.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v7.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v6.h, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v12.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v9, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v14.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v5.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v9.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v4.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v9, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v7.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v8.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v4.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v5.l, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v6.l, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v9, v3 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -22745,19 +22630,19 @@ define <8 x half> @bitcast_v16i8_to_v8f16(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v7.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v7.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v15.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v16 @@ -22772,94 +22657,71 @@ define <8 x half> @bitcast_v16i8_to_v8f16(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB106_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v10.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v0.l, v8.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v1.h, v6.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v6.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v11, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v2.l, v5.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v11, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v3.l, v4.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v11, v3 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB106_2 ; GFX11-TRUE16-NEXT: .LBB106_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v10.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v9.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v7.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v12.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v14.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v8.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v9.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v7.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v6.h, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v12.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v9, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v14.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v5.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v9.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v4.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v9, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v7.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v8.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v4.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v5.l, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v6.l, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v9, v3 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -24960,19 +24822,19 @@ define <8 x bfloat> @bitcast_v16i8_to_v8bf16(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v7.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v7.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v15.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v16 @@ -24987,94 +24849,71 @@ define <8 x bfloat> @bitcast_v16i8_to_v8bf16(<16 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB110_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v10.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v0.l, v8.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v1.h, v6.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v6.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v11, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v2.l, v5.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v11, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v3.l, v4.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v11.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v11, v3 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB110_2 ; GFX11-TRUE16-NEXT: .LBB110_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v10.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v9.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v7.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v12.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v14.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v8.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v9.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v7.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v6.h, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v12.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v9, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v14.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v5.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v9, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v9.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v4.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.h, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v9, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v7.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v8.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v9.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v4.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v5.l, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v5.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v6.l, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v9, v3 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll index cb4b3bd..0a73571 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll @@ -6298,31 +6298,33 @@ define <8 x i32> @bitcast_v32i8_to_v8i32(<32 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v19.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v17.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v13.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v12.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v0.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v3.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v23.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v22.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v21.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v25.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v24.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v23.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v22.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v21.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v29.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v31.l ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v32 ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_3 @@ -6335,48 +6337,43 @@ define <8 x i32> @bitcast_v32i8_to_v8i32(<32 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB26_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v21.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v17.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v0.l, v19.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v21.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v18.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v14.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v21, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v1.h, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v21, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v2.l, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v21.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v3.l, v12.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v13.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v10.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v21, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v4.l, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v9.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v10.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 @@ -6387,122 +6384,88 @@ define <8 x i32> @bitcast_v32i8_to_v8i32(<32 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v5.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v8.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v6.l, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v21.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v21, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v7.l, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v21, v7 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB26_2 ; GFX11-TRUE16-NEXT: .LBB26_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v21.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v20.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v17.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v16.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v19.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v18.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v14.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v13.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v13.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v14.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v16.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v18.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v20.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v22.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v28.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v30.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v19.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v19.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v18.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v17.h, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v15.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v13.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v12.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v21, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v14.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v14.h, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v15.l, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v21, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v21.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v12.h, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v13.l, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v16.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v18.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v20.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v22.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v11.l, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v11.h, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v21, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v10.l, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v10.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v28.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v30.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v9.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v10 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v8.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v8.h, v6.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v21, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v19.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v17.l, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v17.h, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v15.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v15.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v16.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v10.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v11.l, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v11.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v12.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v12.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v8.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v8.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v9.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v10.l, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v21, v7 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -13349,31 +13312,33 @@ define <8 x float> @bitcast_v32i8_to_v8f32(<32 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v19.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v17.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v13.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v12.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v0.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v3.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v23.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v22.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v21.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v25.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v24.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v23.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v22.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v21.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v29.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v31.l ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v32 ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB50_3 @@ -13386,48 +13351,43 @@ define <8 x float> @bitcast_v32i8_to_v8f32(<32 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB50_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v21.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v17.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v0.l, v19.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v21.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v18.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v14.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v21, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v1.h, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v21, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v2.l, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v21.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v3.l, v12.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v13.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v10.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v21, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v4.l, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v9.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v10.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 @@ -13438,122 +13398,88 @@ define <8 x float> @bitcast_v32i8_to_v8f32(<32 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v5.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v8.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v6.l, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v21.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v21, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v7.l, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v21, v7 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB50_2 ; GFX11-TRUE16-NEXT: .LBB50_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v21.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v20.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v17.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v16.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v19.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v18.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v14.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v13.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v13.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v14.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v16.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v18.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v20.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v22.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v28.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v30.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v19.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v19.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v18.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v17.h, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v15.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v13.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v12.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v21, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v14.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v14.h, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v15.l, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v21, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v21.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v12.h, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v13.l, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v16.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v18.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v20.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v22.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v11.l, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v11.h, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v21, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v10.l, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v10.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v28.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v30.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v9.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v10 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v8.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v8.h, v6.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v21, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v19.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v17.l, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v17.h, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v15.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v15.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v16.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v10.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v11.l, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v11.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v12.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v12.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v8.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v8.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v9.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v10.l, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v21, v7 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -19888,31 +19814,33 @@ define <4 x i64> @bitcast_v32i8_to_v4i64(<32 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v19.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v17.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v13.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v12.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v0.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v3.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v23.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v22.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v21.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v25.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v24.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v23.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v22.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v21.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v29.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v31.l ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v32 ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB70_3 @@ -19925,48 +19853,43 @@ define <4 x i64> @bitcast_v32i8_to_v4i64(<32 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB70_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v21.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v17.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v0.l, v19.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v21.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v18.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v14.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v21, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v1.h, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v21, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v2.l, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v21.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v3.l, v12.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v13.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v10.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v21, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v4.l, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v9.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v10.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 @@ -19977,122 +19900,88 @@ define <4 x i64> @bitcast_v32i8_to_v4i64(<32 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v5.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v8.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v6.l, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v21.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v21, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v7.l, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v21, v7 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB70_2 ; GFX11-TRUE16-NEXT: .LBB70_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v21.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v20.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v17.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v16.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v19.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v18.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v14.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v13.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v13.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v14.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v16.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v18.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v20.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v22.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v28.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v30.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v19.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v19.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v18.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v17.h, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v15.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v13.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v12.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v21, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v14.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v14.h, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v15.l, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v21, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v21.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v12.h, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v13.l, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v16.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v18.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v20.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v22.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v11.l, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v11.h, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v21, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v10.l, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v10.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v28.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v30.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v9.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v10 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v8.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v8.h, v6.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v21, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v19.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v17.l, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v17.h, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v15.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v15.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v16.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v10.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v11.l, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v11.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v12.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v12.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v8.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v8.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v9.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v10.l, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v21, v7 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -25929,31 +25818,33 @@ define <4 x double> @bitcast_v32i8_to_v4f64(<32 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v19.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v17.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v13.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v12.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v0.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v3.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v23.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v22.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v21.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v25.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v24.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v23.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v22.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v21.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v29.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v31.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v31.l ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v32 ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB86_3 @@ -25966,48 +25857,43 @@ define <4 x double> @bitcast_v32i8_to_v4f64(<32 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB86_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v21.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v17.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v0.l, v19.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v21.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v18.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v14.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v21, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v1.h, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v21, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v2.l, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v21.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v3.l, v12.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v13.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v10.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v21, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v4.l, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v9.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v10.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 @@ -26018,122 +25904,88 @@ define <4 x double> @bitcast_v32i8_to_v4f64(<32 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v5.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v8.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v6.l, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v21.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v21, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v21.l, v7.l, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v21, v7 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB86_2 ; GFX11-TRUE16-NEXT: .LBB86_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v21.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v20.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v17.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v16.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v19.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v18.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v14.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v13.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v13.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v14.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v16.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v18.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v20.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v22.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v28.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v30.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v19.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v19.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v18.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v17.h, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v15.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v13.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v12.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v21, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v14.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v14.h, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v15.l, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v21, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v21.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v12.h, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v13.l, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v16.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v18.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v21, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v20.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v22.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v11.l, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v11.h, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v21, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v10.l, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v10.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v21, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v28.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v30.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v9.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v21, v10 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v8.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v8.h, v6.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v21, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v21.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v19.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v17.l, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v17.h, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v15.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v15.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v16.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v10.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v11.l, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v11.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v12.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v12.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v8.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v8.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v9.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v9.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v10.l, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v21, v7 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll index 3aaf254..b622e6e 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll @@ -3044,91 +3044,66 @@ define <40 x i8> @bitcast_v10i32_to_v40i8(<10 x i32> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v1.l, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v15, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v2.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v28.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v12.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v15, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v3.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v15, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v4.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v15, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v5.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v15.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v23.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v15, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v6.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v22.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v28.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v15, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v7.l, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v13.l ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v21.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v15, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v8.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v19.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v11.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v15, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v9.l, v11.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v9.h, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v15, v11 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v9.l, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v15.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v15, v9 +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v18.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v13.l ; GFX11-TRUE16-NEXT: s_clause 0x2 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 -; GFX11-TRUE16-NEXT: scratch_store_b64 v0, v[11:12], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b64 v0, v[9:10], off offset:32 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: bitcast_v10i32_to_v40i8: @@ -5025,39 +5000,41 @@ define <10 x i32> @bitcast_v40i8_to_v10i32(<40 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v25.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v23.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v21.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v19.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(4) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v17.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v3.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v5.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v35.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v30.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v29.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v28.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v27.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v29.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v28.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v27.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v29.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v33.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v33.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v34.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v34.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v35.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v33.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v33.h +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v34.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v34.h +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(4) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v35.l ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v36 ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB14_3 @@ -5071,63 +5048,53 @@ define <10 x i32> @bitcast_v40i8_to_v10i32(<40 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB14_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v26.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v25.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v0.l, v24.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v25.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v27.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v23.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v27.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v19.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v19.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v27, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v1.h, v23.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v25.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v21.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v27, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v2.l, v20.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v27.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v17.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v15.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v27, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v3.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v27.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v14.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v27, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v4.l, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v27.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v30.l ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v32.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v32.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v27, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v5.l, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v27.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v12.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v31.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v31.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v27, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v6.l, v13.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v27.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v24.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v12.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_lo16 @@ -5140,147 +5107,110 @@ define <10 x i32> @bitcast_v40i8_to_v10i32(<40 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v10.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v27, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v7.l, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v27.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v27, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v8.l, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v27.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v27, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v9.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v27.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v27, v9 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB14_2 ; GFX11-TRUE16-NEXT: .LBB14_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v26.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v25.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v22.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v21.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v25.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v21.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v20.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v19.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v15.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v15.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v16.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v18.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v20.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v22.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v28.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v30.l, 3 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v32.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v32.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, v31.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, v31.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v24.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v25.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v23.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v23.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v19.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v19.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v17.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v25, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v16.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v20.h, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v21.l, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v25, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v25.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v17.h, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v18.h, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v16.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v18.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v25, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v20.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v22.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v15.l, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v15.h, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v25, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v14.l, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v14.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v25, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v28.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v30.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v13.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v13.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v25, v8 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v12.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v12.h, v6.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v32.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v32.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v25, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v31.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v31.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v11.l, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v11.h, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v25, v12 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v10.l, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.h, v8.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v25, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v21.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v22.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v23.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v23.h, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v24.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v16.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v17.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v17.h, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v18.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v19.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v12.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v13.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v13.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v14.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v14.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v10.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v10.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v11.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v11.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v12.l, v9.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, 0x300, v8.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v8.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v9.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v25, v9 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -9991,91 +9921,66 @@ define <40 x i8> @bitcast_v10f32_to_v40i8(<10 x float> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v1.l, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v15, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v2.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v28.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v12.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v15, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v3.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v15, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v4.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v15, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v5.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v15.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v23.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v15, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v6.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v22.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v28.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v15, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v7.l, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v13.l ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v21.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v15, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v8.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v19.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v11.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v15, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v9.l, v11.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v9.h, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v15, v11 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v9.l, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v15.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v15, v9 +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v18.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v13.l ; GFX11-TRUE16-NEXT: s_clause 0x2 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 -; GFX11-TRUE16-NEXT: scratch_store_b64 v0, v[11:12], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b64 v0, v[9:10], off offset:32 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: bitcast_v10f32_to_v40i8: @@ -11997,39 +11902,41 @@ define <10 x float> @bitcast_v40i8_to_v10f32(<40 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v25.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v23.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v21.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v19.l -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(4) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.h, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v17.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v3.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v5.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v35.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v30.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v29.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v28.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v27.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v29.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v28.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v27.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v29.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v33.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v33.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v34.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v34.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v35.l +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(7) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v33.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v33.h +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(5) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v34.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v34.h +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(4) +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v35.l ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v36 ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB34_3 @@ -12043,63 +11950,53 @@ define <10 x float> @bitcast_v40i8_to_v10f32(<40 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB34_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v26.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v25.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v21.h -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v0.l, v24.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v25.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v27.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v23.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v27.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v19.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v19.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v27, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v1.h, v23.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v25.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v21.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v27, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v2.l, v20.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v27.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v17.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v15.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v27, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v3.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v27.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v14.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v27, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v4.l, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v27.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v30.l ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v32.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v32.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v27, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v5.l, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v27.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v12.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v31.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v31.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v27, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v6.l, v13.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v27.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v23.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v24.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v12.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_lo16 @@ -12112,147 +12009,110 @@ define <10 x float> @bitcast_v40i8_to_v10f32(<40 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr15_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr13_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v10.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr14_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v27, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v7.l, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v27.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v27, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v8.l, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v27.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v27, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v27.l, v9.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v27.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v27, v9 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr12_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB34_2 ; GFX11-TRUE16-NEXT: .LBB34_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v26.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v25.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v22.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v21.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v25.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v21.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v20.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v19.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v15.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v15.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v16.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v18.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v20.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v22.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v28.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v30.l, 3 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v32.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v32.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, v31.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, v31.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v24.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v25.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v23.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v23.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v19.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v19.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v17.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v25, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v16.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v20.h, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v21.l, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v25, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v25.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v17.h, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v18.h, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v16.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v18.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v25, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v20.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v22.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v15.l, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v15.h, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v25, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v14.l, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v14.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v25, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v28.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v30.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v13.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v13.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v25, v8 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v12.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v12.h, v6.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v32.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v32.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v25, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v31.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v31.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v11.l, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v11.h, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v25, v12 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v10.l, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.h, v8.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v25, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v25.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v21.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v22.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v23.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v23.h, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v24.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v16.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v17.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v17.h, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v18.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v19.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v12.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v13.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v13.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v14.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v14.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v10.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v10.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v11.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v11.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v12.l, v9.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, 0x300, v8.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v8.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v9.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v25, v9 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -16367,91 +16227,66 @@ define <40 x i8> @bitcast_v20i16_to_v40i8(<20 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v1.l, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v15, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v2.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v28.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v12.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v15, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v3.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v15, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v4.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v15, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v5.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v15.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v23.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v15, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v6.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v22.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v28.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v15, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v7.l, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v13.l ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v21.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v15, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v8.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v19.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v11.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v15, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v9.l, v11.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v9.h, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v15, v11 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v9.l, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v15.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v15, v9 +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v18.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v13.l ; GFX11-TRUE16-NEXT: s_clause 0x2 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 -; GFX11-TRUE16-NEXT: scratch_store_b64 v0, v[11:12], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b64 v0, v[9:10], off offset:32 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: bitcast_v20i16_to_v40i8: @@ -22484,91 +22319,66 @@ define <40 x i8> @bitcast_v20f16_to_v40i8(<20 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v1.l, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v15, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v2.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v28.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v12.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v15, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v3.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v15, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v4.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v15, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v5.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v15.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v23.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v15, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v6.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v22.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v28.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v15, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v7.l, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v13.l ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v21.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v15, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v8.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v19.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v11.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v15, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v9.l, v11.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v9.h, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v15, v11 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v9.l, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v15.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v15, v9 +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v18.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v13.l ; GFX11-TRUE16-NEXT: s_clause 0x2 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 -; GFX11-TRUE16-NEXT: scratch_store_b64 v0, v[11:12], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b64 v0, v[9:10], off offset:32 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: bitcast_v20f16_to_v40i8: @@ -28791,39 +28601,38 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v27.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v25.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v23.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v21.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v18.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v16.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v3.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v5.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.l, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v17.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v19.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v21.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v48.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v39.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v38.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v48.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v39.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v39.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v38.h ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v36.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v37.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v38.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v36.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v36.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v37.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v37.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v38.l ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v49 ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB72_3 @@ -28837,65 +28646,55 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB72_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v35.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v35.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v29.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v0.l, v34.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v33.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v27.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v27.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v10, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v1.h, v33.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v21.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v34.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v28.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v23.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v23.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v22.h +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v21.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v10, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v2.l, v28.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v10.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v25.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v10, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v3.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v26.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v22.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v10, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v4.l, v23.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v30.l ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v32.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v32.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v10, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v5.l, v21.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v18.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v31.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v31.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v10, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v6.l, v19.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v29.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v30.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v33.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v34.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v24.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v25.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v26.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v27.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v18.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_lo16 @@ -28906,146 +28705,110 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v16.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v10, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v7.l, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v10.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v10, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v8.l, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v10.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v10, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v9.l, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v10.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v10, v9 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB72_2 ; GFX11-TRUE16-NEXT: .LBB72_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v35.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v35.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v30.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v29.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v34.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v29.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v28.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v27.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v23.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v23.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v22.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v21.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v20.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v22.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v28.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v30.l, 3 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v32.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v32.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, v31.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, v31.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v34.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v34.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v33.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v33.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v27.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v27.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v25.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v10, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v24.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v28.h, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v29.l, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v10, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v25.h, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v26.h, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v21.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v20.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v10, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v20.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v22.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v23.l, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v23.h, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v10, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v21.h, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v22.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v10, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v28.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v30.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v19.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v19.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v10, v8 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v18.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v18.h, v6.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v32.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v32.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v10, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v31.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v31.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v17.l, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v17.h, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v10, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v16.l, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v16.h, v8.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v10, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v29.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v30.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v33.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v33.h, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v34.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v24.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v25.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v25.h, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v26.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v27.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v18.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v19.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v19.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v20.h, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v21.l, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v16.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v16.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v17.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v17.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v18.l, v9.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, 0x300, v8.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v8.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v9.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v10, v9 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -30878,91 +30641,66 @@ define <40 x i8> @bitcast_v5f64_to_v40i8(<5 x double> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v1.l, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v15, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v2.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v28.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v12.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v15, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v3.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v15, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v4.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v15, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v5.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v15.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v23.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v15, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v6.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v22.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v28.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v15, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v7.l, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v13.l ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v21.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v15, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v8.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v19.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v11.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v15, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v9.l, v11.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v9.h, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v15, v11 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v9.l, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v15.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v15, v9 +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v18.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v13.l ; GFX11-TRUE16-NEXT: s_clause 0x2 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 -; GFX11-TRUE16-NEXT: scratch_store_b64 v0, v[11:12], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b64 v0, v[9:10], off offset:32 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: bitcast_v5f64_to_v40i8: @@ -32912,39 +32650,38 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v27.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v25.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.l, v23.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v21.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.h, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v18.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v16.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.l, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v3.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.l, 8, v5.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.l, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v17.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v19.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v21.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v48.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v39.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v38.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v48.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v39.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v39.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v38.h ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v36.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v37.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v38.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v36.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v36.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v37.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v37.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v38.l ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v49 ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB76_3 @@ -32958,65 +32695,55 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB76_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v35.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v35.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v30.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v29.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v0.l, v34.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v34.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v33.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v27.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v27.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v10, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v1.h, v33.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v21.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v34.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v28.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v23.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v23.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v22.h +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v21.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v10, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v2.l, v28.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v10.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v25.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v10, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v3.l, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v26.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v22.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v10, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v4.l, v23.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v30.l ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v32.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v32.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v10, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v5.l, v21.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v18.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v31.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v31.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v10, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v6.l, v19.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v29.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v30.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v33.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v33.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v34.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v24.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v25.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v26.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v27.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v18.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_lo16 @@ -33027,146 +32754,110 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v16.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v10, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v7.l, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v10.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v10, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v8.l, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v10.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v10, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v9.l, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v10.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v10, v9 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB76_2 ; GFX11-TRUE16-NEXT: .LBB76_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v35.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v35.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v30.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v29.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v34.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v29.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v28.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v27.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v23.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v23.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v22.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v21.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v20.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v22.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v28.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v30.l, 3 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v32.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v32.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, v31.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, v31.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v34.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v34.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v33.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v33.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v27.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v27.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v25.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v10, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v24.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v28.h, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v29.l, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v10, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v25.h, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v26.h, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v21.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v20.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v10, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v20.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v22.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v23.l, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v23.h, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v10, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v21.h, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v22.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v10, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v28.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v30.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v19.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v19.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v10, v8 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v18.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v18.h, v6.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v32.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v32.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v10, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v31.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v31.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v17.l, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v17.h, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v10, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v16.l, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v16.h, v8.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v10, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v29.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v30.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v33.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v33.h, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v34.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v24.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v25.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v25.h, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v26.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v27.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v18.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v19.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v19.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v20.h, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v21.l, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v16.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v16.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v17.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v17.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v18.l, v9.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, 0x300, v8.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v8.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v9.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v10, v9 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -35022,91 +34713,66 @@ define <40 x i8> @bitcast_v5i64_to_v40i8(<5 x i64> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v1.l, v11.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v15, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v2.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v28.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v12.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v15, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v3.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v26.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v15, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v4.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v15, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v5.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v15.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v24.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v23.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v15, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v6.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v22.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v12.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v28.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v15, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v7.l, v11.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v13.l ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v21.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v15, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v8.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v15.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v19.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v11.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v15, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v9.l, v11.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v9.h, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v15.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v18.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v15, v11 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v9.l, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v15.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v15, v9 +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v18.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v13.l ; GFX11-TRUE16-NEXT: s_clause 0x2 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 -; GFX11-TRUE16-NEXT: scratch_store_b64 v0, v[11:12], off offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b64 v0, v[9:10], off offset:32 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: bitcast_v5i64_to_v40i8: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll index 632b03c..e6c7b1a 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll @@ -2279,17 +2279,13 @@ define i32 @bitcast_v4i8_to_i32(<4 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB22_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v1.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB22_2 ; GFX11-TRUE16-NEXT: .LBB22_4: ; %cmp.true @@ -2301,13 +2297,9 @@ define i32 @bitcast_v4i8_to_i32(<4 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v1.l, v0.l ; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -4530,17 +4522,13 @@ define float @bitcast_v4i8_to_f32(<4 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB42_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v1.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB42_2 ; GFX11-TRUE16-NEXT: .LBB42_4: ; %cmp.true @@ -4552,13 +4540,9 @@ define float @bitcast_v4i8_to_f32(<4 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v1.l, v0.l ; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -6487,17 +6471,13 @@ define <2 x i16> @bitcast_v4i8_to_v2i16(<4 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB58_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v1.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB58_2 ; GFX11-TRUE16-NEXT: .LBB58_4: ; %cmp.true @@ -6509,13 +6489,9 @@ define <2 x i16> @bitcast_v4i8_to_v2i16(<4 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v1.l, v0.l ; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -8138,17 +8114,13 @@ define <2 x half> @bitcast_v4i8_to_v2f16(<4 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB70_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v1.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB70_2 ; GFX11-TRUE16-NEXT: .LBB70_4: ; %cmp.true @@ -8160,13 +8132,9 @@ define <2 x half> @bitcast_v4i8_to_v2f16(<4 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v1.l, v0.l ; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -9502,17 +9470,13 @@ define <2 x bfloat> @bitcast_v4i8_to_v2bf16(<4 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB78_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v1.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB78_2 ; GFX11-TRUE16-NEXT: .LBB78_4: ; %cmp.true @@ -9524,13 +9488,9 @@ define <2 x bfloat> @bitcast_v4i8_to_v2bf16(<4 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v1.l, v0.l ; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -10212,17 +10172,13 @@ define <1 x i32> @bitcast_v4i8_to_v1i32(<4 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB82_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v1.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1_hi16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB82_2 ; GFX11-TRUE16-NEXT: .LBB82_4: ; %cmp.true @@ -10234,13 +10190,9 @@ define <1 x i32> @bitcast_v4i8_to_v1i32(<4 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v1.l, v0.l ; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll index d3fbba3..bff054f 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll @@ -8921,133 +8921,98 @@ define <64 x i8> @bitcast_v16i32_to_v64i8(<16 x i32> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v25.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v24.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v1.l, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v17.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v24.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v55.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v24, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v2.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v54.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v24, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v3.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v52.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v24, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v4.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v24, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v5.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v18.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v24, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v6.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v24.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v24, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v7.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v39.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v17.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v39.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v38.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v24, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v8.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v24, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v9.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v24, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v10.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v34.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v53.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v50.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v35.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v24, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v11.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v24.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v33.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v32.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v31.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v24, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v12.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v31.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v21.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v38.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v37.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v34.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v24, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v13.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v19.l ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v24, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v14.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v28.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v17.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v24, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v15.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v18.l ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v26.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v24, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v16.l, v17.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v17.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v24, v16 +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v16.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v19.l ; GFX11-TRUE16-NEXT: s_clause 0x3 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 @@ -12574,53 +12539,52 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v38, off, s32 offset:4 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(11) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v29.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v27.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v22.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v20.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v22.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v20.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v18.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.l, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v2.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.l, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v39.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v48.l, 8, v19.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v21.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v27.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v80.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.l, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v39.l, 8, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v39.h, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v48.l, 8, v21.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v48.h, 8, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v80.h ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v64.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v64.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v65.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v65.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v66.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v67.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v67.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v68.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v68.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v69.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v69.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v70.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v71.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v71.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v64.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v64.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v65.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v65.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v66.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v66.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v67.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v67.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v68.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v68.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v69.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v69.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v70.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v70.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v71.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v71.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v80.l ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v81 ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB26_3 @@ -12633,98 +12597,82 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; GFX11-TRUE16-NEXT: .LBB26_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v55.h -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v53.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v52.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v52.l -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v0.l, v54.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v55.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v54.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v64.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v49.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v49.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v64, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v1.h, v53.l +; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v55.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v53.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v50.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v50.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v49.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v49.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v51.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v64, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v2.l, v51.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v64.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v48.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v39.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v48.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v64, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v3.l, v50.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v50.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v30.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v64, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v4.l, v39.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v27.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v30.l ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v38.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v38.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v5.l, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v37.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v64, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v6.l, v27.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v64.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v36.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v22.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v35.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v64, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v7.l, v25.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v21.h ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v35.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v34.h ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v34.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v64, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v8.l, v23.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v20.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v33.h ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v33.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v64, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v9.l, v22.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v64.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v18.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v32.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v64, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v10.l, v21.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v54.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v51.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v52.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v53.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v30.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v39.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v39.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v48.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v48.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v24.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v25.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v26.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v27.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v18.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_lo16 @@ -12745,226 +12693,170 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v64, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v11.l, v20.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v16.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v64, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v12.l, v19.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v64.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v64, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v13.l, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v64.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v64, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v14.l, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v64.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v64, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v15.l, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v64.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v64, v15 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB26_2 ; GFX11-TRUE16-NEXT: .LBB26_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v55.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v53.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v52.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v52.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v55.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v53.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v50.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v50.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v49.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v49.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v29.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v29.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v28.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v27.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v23.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v28.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v30.l, 3 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v38.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v38.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, v37.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, v37.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v36.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v36.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, v35.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, v35.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v34.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v34.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, v33.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, v33.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v32.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v32.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, v31.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, v31.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v54.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v55.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v54.l, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v53.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v49.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v49.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v48.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v52, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v39.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v51.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v51.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v52, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v52.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v50.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v50.h, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v29.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v28.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v52, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v26.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v24.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v39.h, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v48.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v52, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v29.h, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v30.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v52, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v28.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v30.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v27.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v27.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v52, v8 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v25.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v25.h, v6.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v38.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v38.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v52, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v37.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v37.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v23.l, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v23.h, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v52, v10 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v22.l, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v22.h, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v36.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v36.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v52, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v35.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v35.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v21.l, v9.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v21.h, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v52, v12 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v20.l, v10.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v20.h, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v34.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v34.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v52, v13 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v33.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v33.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v19.l, v11.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v19.h, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v52, v14 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v18.l, v12.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v18.h, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v32.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v32.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v52, v15 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v31.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v31.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v17.l, v13.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v17.h, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v52, v18 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v16.l, v14.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.h, v14.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v52, v17 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v54.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v54.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v51.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v51.h, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v52.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v52.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v53.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v30.h, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v39.l, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v39.h, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v48.l, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v48.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v24.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v25.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v25.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v26.h, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v27.l, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v21.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v21.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v22.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v22.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v23.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v18.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v19.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v19.h, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v20.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v20.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v16.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v16.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v17.l, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v17.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v18.l, v15.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, 0x300, v8.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v8.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v9.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v9.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v10.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v10.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, 0x300, v11.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v11.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, 0x300, v12.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v12.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, 0x300, v13.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v13.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, 0x300, v14.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v14.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, 0x300, v15.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v52, v15 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -23576,133 +23468,98 @@ define <64 x i8> @bitcast_v16f32_to_v64i8(<16 x float> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v25.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v24.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v1.l, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v17.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v24.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v55.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v24, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v2.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v54.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v24, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v3.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v52.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v24, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v4.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v24, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v5.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v18.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v24, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v6.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v24.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v24, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v7.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v39.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v17.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v39.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v38.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v24, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v8.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v24, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v9.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v24, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v10.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v34.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v53.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v50.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v35.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v24, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v11.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v24.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v33.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v32.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v31.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v24, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v12.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v31.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v21.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v38.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v37.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v34.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v24, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v13.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v19.l ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v24, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v14.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v28.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v17.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v24, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v15.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v18.l ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v26.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v24, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v16.l, v17.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v17.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v24, v16 +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v16.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v19.l ; GFX11-TRUE16-NEXT: s_clause 0x3 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 @@ -27358,53 +27215,52 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v38, off, s32 offset:4 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(11) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v29.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v27.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v22.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v20.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v22.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v20.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v18.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.l, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v2.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.l, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v39.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v48.l, 8, v19.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v21.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v27.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v80.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.l, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v39.l, 8, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v39.h, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v48.l, 8, v21.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v48.h, 8, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v80.h ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v64.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v64.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v65.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v65.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v66.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v67.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v67.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v68.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v68.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v69.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v69.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v70.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v71.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v71.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v64.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v64.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v65.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v65.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v66.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v66.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v67.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v67.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v68.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v68.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v69.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v69.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v70.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v70.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v71.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v71.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v80.l ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v81 ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB50_3 @@ -27417,98 +27273,82 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; GFX11-TRUE16-NEXT: .LBB50_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v55.h -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v53.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v52.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v52.l -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v0.l, v54.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v55.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v54.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v64.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v49.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v49.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v64, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v1.h, v53.l +; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v55.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v53.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v50.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v50.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v49.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v49.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v51.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v64, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v2.l, v51.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v64.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v48.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v39.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v48.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v64, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v3.l, v50.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v50.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v30.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v64, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v4.l, v39.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v27.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v30.l ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v38.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v38.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v5.l, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v37.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v64, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v6.l, v27.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v64.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v36.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v22.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v35.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v64, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v7.l, v25.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v21.h ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v35.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v34.h ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v34.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v64, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v8.l, v23.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v20.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v33.h ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v33.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v64, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v9.l, v22.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v64.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v18.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v32.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v64, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v10.l, v21.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v54.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v51.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v52.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v53.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v30.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v39.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v39.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v48.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v48.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v24.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v25.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v26.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v27.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v18.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_lo16 @@ -27529,226 +27369,170 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v64, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v11.l, v20.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v16.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v64, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v12.l, v19.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v64.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v64, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v13.l, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v64.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v64, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v14.l, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v64.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v64, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v15.l, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v64.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v64, v15 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB50_2 ; GFX11-TRUE16-NEXT: .LBB50_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v55.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v53.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v52.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v52.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v55.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v53.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v50.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v50.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v49.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v49.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v29.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v29.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v28.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v27.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v23.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v28.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v30.l, 3 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v38.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v38.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, v37.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, v37.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v36.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v36.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, v35.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, v35.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v34.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v34.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, v33.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, v33.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v32.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v32.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, v31.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, v31.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v54.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v55.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v54.l, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v53.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v49.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v49.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v48.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v52, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v39.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v51.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v51.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v52, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v52.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v50.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v50.h, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v29.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v28.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v52, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v26.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v24.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v39.h, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v48.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v52, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v29.h, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v30.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v52, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v28.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v30.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v27.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v27.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v52, v8 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v25.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v25.h, v6.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v38.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v38.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v52, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v37.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v37.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v23.l, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v23.h, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v52, v10 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v22.l, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v22.h, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v36.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v36.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v52, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v35.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v35.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v21.l, v9.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v21.h, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v52, v12 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v20.l, v10.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v20.h, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v34.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v34.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v52, v13 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v33.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v33.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v19.l, v11.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v19.h, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v52, v14 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v18.l, v12.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v18.h, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v32.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v32.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v52, v15 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v31.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v31.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v17.l, v13.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v17.h, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v52, v18 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v16.l, v14.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.h, v14.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v52, v17 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v54.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v54.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v51.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v51.h, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v52.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v52.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v53.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v30.h, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v39.l, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v39.h, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v48.l, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v48.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v24.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v25.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v25.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v26.h, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v27.l, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v21.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v21.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v22.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v22.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v23.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v18.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v19.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v19.h, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v20.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v20.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v16.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v16.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v17.l, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v17.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v18.l, v15.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, 0x300, v8.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v8.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v9.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v9.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v10.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v10.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, 0x300, v11.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v11.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, 0x300, v12.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v12.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, 0x300, v13.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v13.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, 0x300, v14.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v14.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, 0x300, v15.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v52, v15 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -37760,133 +37544,98 @@ define <64 x i8> @bitcast_v8i64_to_v64i8(<8 x i64> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v25.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v24.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v1.l, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v17.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v24.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v55.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v24, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v2.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v54.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v24, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v3.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v52.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v24, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v4.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v24, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v5.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v18.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v24, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v6.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v24.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v24, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v7.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v39.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v17.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v39.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v38.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v24, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v8.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v24, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v9.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v24, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v10.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v34.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v53.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v50.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v35.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v24, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v11.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v24.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v33.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v32.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v31.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v24, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v12.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v31.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v21.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v38.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v37.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v34.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v24, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v13.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v19.l ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v24, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v14.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v28.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v17.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v24, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v15.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v18.l ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v26.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v24, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v16.l, v17.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v17.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v24, v16 +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v16.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v19.l ; GFX11-TRUE16-NEXT: s_clause 0x3 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 @@ -41418,53 +41167,52 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v38, off, s32 offset:4 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(11) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v29.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v27.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v22.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v20.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v22.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v20.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v18.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.l, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v2.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.l, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v39.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v48.l, 8, v19.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v21.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v27.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v80.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.l, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v39.l, 8, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v39.h, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v48.l, 8, v21.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v48.h, 8, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v80.h ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v64.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v64.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v65.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v65.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v66.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v67.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v67.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v68.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v68.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v69.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v69.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v70.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v71.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v71.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v64.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v64.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v65.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v65.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v66.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v66.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v67.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v67.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v68.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v68.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v69.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v69.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v70.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v70.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v71.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v71.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v80.l ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v81 ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB70_3 @@ -41477,98 +41225,82 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; GFX11-TRUE16-NEXT: .LBB70_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v55.h -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v53.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v52.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v52.l -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v0.l, v54.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v55.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v54.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v64.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v49.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v49.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v64, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v1.h, v53.l +; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v55.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v53.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v50.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v50.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v49.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v49.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v51.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v64, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v2.l, v51.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v64.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v48.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v39.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v48.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v64, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v3.l, v50.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v50.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v30.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v64, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v4.l, v39.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v27.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v30.l ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v38.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v38.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v5.l, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v37.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v64, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v6.l, v27.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v64.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v36.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v22.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v35.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v64, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v7.l, v25.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v21.h ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v35.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v34.h ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v34.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v64, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v8.l, v23.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v20.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v33.h ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v33.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v64, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v9.l, v22.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v64.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v18.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v32.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v64, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v10.l, v21.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v54.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v51.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v52.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v53.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v30.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v39.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v39.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v48.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v48.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v24.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v25.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v26.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v27.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v18.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_lo16 @@ -41589,226 +41321,170 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v64, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v11.l, v20.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v16.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v64, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v12.l, v19.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v64.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v64, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v13.l, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v64.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v64, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v14.l, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v64.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v64, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v15.l, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v64.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v64, v15 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB70_2 ; GFX11-TRUE16-NEXT: .LBB70_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v55.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v53.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v52.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v52.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v55.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v53.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v50.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v50.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v49.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v49.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v29.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v29.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v28.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v27.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v23.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v28.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v30.l, 3 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v38.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v38.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, v37.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, v37.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v36.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v36.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, v35.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, v35.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v34.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v34.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, v33.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, v33.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v32.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v32.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, v31.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, v31.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v54.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v55.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v54.l, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v53.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v49.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v49.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v48.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v52, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v39.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v51.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v51.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v52, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v52.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v50.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v50.h, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v29.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v28.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v52, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v26.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v24.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v39.h, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v48.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v52, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v29.h, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v30.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v52, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v28.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v30.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v27.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v27.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v52, v8 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v25.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v25.h, v6.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v38.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v38.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v52, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v37.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v37.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v23.l, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v23.h, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v52, v10 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v22.l, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v22.h, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v36.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v36.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v52, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v35.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v35.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v21.l, v9.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v21.h, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v52, v12 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v20.l, v10.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v20.h, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v34.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v34.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v52, v13 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v33.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v33.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v19.l, v11.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v19.h, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v52, v14 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v18.l, v12.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v18.h, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v32.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v32.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v52, v15 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v31.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v31.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v17.l, v13.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v17.h, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v52, v18 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v16.l, v14.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.h, v14.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v52, v17 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v54.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v54.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v51.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v51.h, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v52.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v52.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v53.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v30.h, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v39.l, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v39.h, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v48.l, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v48.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v24.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v25.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v25.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v26.h, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v27.l, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v21.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v21.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v22.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v22.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v23.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v18.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v19.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v19.h, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v20.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v20.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v16.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v16.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v17.l, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v17.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v18.l, v15.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, 0x300, v8.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v8.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v9.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v9.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v10.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v10.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, 0x300, v11.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v11.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, 0x300, v12.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v12.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, 0x300, v13.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v13.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, 0x300, v14.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v14.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, 0x300, v15.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v52, v15 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -50954,133 +50630,98 @@ define <64 x i8> @bitcast_v8f64_to_v64i8(<8 x double> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v25.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v24.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v1.l, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v17.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v24.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v55.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v24, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v2.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v54.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v24, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v3.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v52.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v24, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v4.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v24, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v5.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v18.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v24, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v6.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v24.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v24, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v7.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v39.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v17.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v39.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v38.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v24, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v8.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v24, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v9.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v24, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v10.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v34.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v53.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v50.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v35.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v24, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v11.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v24.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v33.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v32.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v31.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v24, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v12.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v31.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v21.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v38.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v37.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v34.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v24, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v13.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v19.l ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v24, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v14.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v28.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v17.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v24, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v15.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v18.l ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v26.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v24, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v16.l, v17.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v17.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v24, v16 +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v16.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v19.l ; GFX11-TRUE16-NEXT: s_clause 0x3 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 @@ -54638,53 +54279,52 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_load_d16_hi_b16 v38, off, s32 offset:4 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(11) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v29.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v27.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v22.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v20.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v22.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v20.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v18.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.l, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v2.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.h, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.l, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.l, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.l, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v55.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v7.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.l, 8, v13.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v50.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v39.h, 8, v17.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v48.l, 8, v19.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v21.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v27.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v80.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v54.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v51.h, 8, v7.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.l, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v52.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v53.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v39.l, 8, v17.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v39.h, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v48.l, 8, v21.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v48.h, 8, v23.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v25.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.l, 8, v27.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v80.h ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v64.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v64.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v65.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v65.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v66.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v67.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v67.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v68.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v68.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v69.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v69.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v70.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v70.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v71.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v71.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v64.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v64.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v65.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v65.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v66.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v66.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v67.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.l, 8, v67.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v68.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v68.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.l, 8, v69.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v69.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.l, 8, v70.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v70.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v71.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v71.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v80.l ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v81 ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB86_3 @@ -54697,98 +54337,82 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; GFX11-TRUE16-NEXT: .LBB86_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v55.h -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v53.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v52.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v52.l -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v0.l, v54.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v55.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.l, v54.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v64.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v49.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v49.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v64, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v1.h, v53.l +; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v55.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v53.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v50.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v50.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v49.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v49.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v28.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v51.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v64, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v2.l, v51.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v64.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v48.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v39.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v48.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v64, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v3.l, v50.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v50.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v30.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v64, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v4.l, v39.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v27.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v30.l ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v38.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v38.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v64, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v5.l, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v25.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v23.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v37.h ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v37.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v64, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v6.l, v27.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v64.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v36.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v22.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v35.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v64, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v7.l, v25.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v21.h ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v35.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v34.h ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v34.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v64, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v8.l, v23.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v20.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v33.h ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v33.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v64, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v9.l, v22.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v64.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v18.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v32.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v31.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v64, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v10.l, v21.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v31.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v54.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v51.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v52.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v53.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v30.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v39.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v39.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v48.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v48.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v24.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v25.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v25.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v26.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v27.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v22.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v19.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v16.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v18.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr28_lo16 @@ -54809,226 +54433,170 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr31_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr24_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr26_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr22_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v64, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v11.l, v20.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v64.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v16.h +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr20_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v64, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v12.l, v19.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v64.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v64, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v13.l, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v64.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v64, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v14.l, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v64.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v64, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v64.l, v15.l, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v64.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_lo16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v64, v15 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr18_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB86_2 ; GFX11-TRUE16-NEXT: .LBB86_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v55.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v53.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v52.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v52.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v55.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v53.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v50.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v50.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v49.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, v49.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, v29.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v29.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v28.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, v27.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, v23.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, v28.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, v30.l, 3 +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v38.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v38.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, v37.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, v37.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v36.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v36.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, v35.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, v35.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v34.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v34.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, v33.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, v33.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v32.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v32.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, v31.h, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, v31.l, 3 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v54.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v55.l, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v54.l, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v53.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v49.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v49.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v48.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v52, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v39.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v51.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v51.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v52, v4 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v52.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v50.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v50.h, v2.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v29.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v28.h, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v52, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v3.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v3.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, v26.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, v24.h, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v39.h, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v48.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v52, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v29.h, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v30.h, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v24.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v26.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v52, v7 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v5.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, v28.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, v30.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v27.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v27.h, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v52, v8 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v25.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v25.h, v6.h -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v38.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v38.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v52, v9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v7.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v7.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, v37.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, v37.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v23.l, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v23.h, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v52, v10 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v22.l, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v22.h, v8.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v36.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v36.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v52, v11 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v9.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v9.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, v35.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, v35.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v21.l, v9.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v21.h, v9.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v52, v12 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v20.l, v10.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v20.h, v10.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v34.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v34.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v52, v13 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v11.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v11.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, v33.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, v33.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v19.l, v11.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v19.h, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v52, v14 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v18.l, v12.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v18.h, v12.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v32.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v32.l, 3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v52, v15 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v13.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v18.h, 0x300, v13.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, v31.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, v31.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v17.l, v13.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v17.h, v13.h +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v52, v18 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v17.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v16.l, v14.l -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.h, v14.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v52, v17 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v52.l, 0x300, v15.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v54.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v54.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v51.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v51.h, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v52.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v52.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v53.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v30.h, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v39.l, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v39.h, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v48.l, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v48.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v24.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v25.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v25.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v26.h, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v27.l, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v21.l, v8.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v21.h, v9.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v22.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v22.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v23.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v18.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v19.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v19.h, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v20.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v20.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v16.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v16.h, v14.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v17.l, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v17.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v18.l, v15.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v3.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v3.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x300, v4.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x300, v5.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v5.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.l, 0x300, v6.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v6.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v7.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x300, v7.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.l, 0x300, v8.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v8.h, 0x300, v8.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.l, 0x300, v9.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v9.h, 0x300, v9.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.l, 0x300, v10.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v10.h, 0x300, v10.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.l, 0x300, v11.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v11.h, 0x300, v11.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.l, 0x300, v12.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v12.h, 0x300, v12.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.l, 0x300, v13.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v13.h, 0x300, v13.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.l, 0x300, v14.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v14.h, 0x300, v14.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.l, 0x300, v15.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v15.h, 0x300, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v52.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v52, v15 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -64107,133 +63675,98 @@ define <64 x i8> @bitcast_v32i16_to_v64i8(<32 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v25.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v24.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v1.l, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v17.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v24.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v55.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v24, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v2.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v54.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v24, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v3.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v52.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v24, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v4.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v24, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v5.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v18.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v24, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v6.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v24.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v24, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v7.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v39.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v17.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v39.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v38.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v24, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v8.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v24, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v9.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v24, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v10.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v34.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v53.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v50.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v35.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v24, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v11.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v24.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v33.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v32.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v31.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v24, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v12.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v31.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v21.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v38.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v37.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v34.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v24, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v13.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v19.l ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v24, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v14.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v28.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v17.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v24, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v15.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v18.l ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v26.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v24, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v16.l, v17.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v17.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v24, v16 +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v16.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v19.l ; GFX11-TRUE16-NEXT: s_clause 0x3 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 @@ -76401,133 +75934,98 @@ define <64 x i8> @bitcast_v32f16_to_v64i8(<32 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v25.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v24.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v1.l, v17.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v17.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v24.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v55.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v3.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v24, v1 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v2.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v54.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v23.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v54.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v17.h ; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v24, v2 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v3.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v53.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v52.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v18.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v52.l ; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v51.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v3.l, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v24, v3 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v4.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v22.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v24, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v5.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v22.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.l, v18.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v24, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v6.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v24.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v3.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v23.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v24, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v7.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v39.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v17.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v39.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v38.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v24, v7 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v8.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v37.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v24, v8 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v9.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v36.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v24, v9 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v10.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v34.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v19.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v53.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v3.h, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v50.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v8.l, v19.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v35.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v24, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v11.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v24.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v33.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v32.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v31.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v24, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v12.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v31.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v21.h +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v6.l, v20.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v38.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.l, 8, v37.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.l, 8, v34.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v19.h ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v24, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v13.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v13.l, v19.l ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v24, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v14.l, v17.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v18.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v28.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v17.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v24, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v15.l, v17.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v26.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v21.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v20.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v9.l, v21.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v11.l, v20.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v14.l, v19.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v17.h +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.l, v18.l ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v17.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v24.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v27.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v26.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v24, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v24.l, v16.l, v17.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v17.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v24, v16 +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v16.l, v18.h +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v16.h, v19.l ; GFX11-TRUE16-NEXT: s_clause 0x3 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 @@ -85053,57 +84551,57 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr17_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr55_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr19_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr21_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr49_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr70_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -85111,29 +84609,29 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB108_2 ; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[7:8] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[5:6] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[29:30], 24, v[15:16] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[34:35], 24, v[11:12] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[3:4] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[5:6] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[29:30], 24, v[15:16] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[9:10] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[3:4] ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v16 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 24, v14 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v14 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v13 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v12 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 24, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v9 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v8 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v3 @@ -85141,11 +84639,11 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v2 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v1 ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[30:31], 24, v[13:14] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[9:10] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[1:2] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[1:2] ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v1.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.h, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v2.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v2.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v3.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.h, v3.h @@ -85155,26 +84653,26 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.h, v5.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, v6.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.h, v6.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.h, v7.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v7.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v7.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.h, v8.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v9.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v9.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.h, v10.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v10.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.h, v11.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.h, v11.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v71.h, v12.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.h, v13.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v13.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v87.h, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v98.h, v14.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v103.h, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v16.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v117.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v15.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v115.h, v16.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v16.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5 @@ -85187,71 +84685,72 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB108_4 ; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true -; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff0000, v4 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v2 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v1 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_lshlrev_b32 v4, 16, v4 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v18 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_add_f32 v20, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_bfe_u32 v24, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v17 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 ; GFX11-TRUE16-NEXT: v_add3_u32 v18, v18, v17, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_add3_u32 v17, v24, v20, 0x7fff +; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v52, v18, v23 :: v_dual_lshlrev_b32 v1, 16, v1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v39, v18, v23 :: v_dual_and_b32 v2, 0xffff0000, v2 -; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_lshlrev_b32 v1, 16, v1 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v2 :: v_dual_add_f32 v1, 0x40c00000, v1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v21, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v2 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: v_bfe_u32 v24, v20, 16, 1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_bfe_u32 v25, v1, 16, 1 -; GFX11-TRUE16-NEXT: v_add3_u32 v21, v21, v2, 0x7fff +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v1 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_add3_u32 v17, v24, v20, 0x7fff +; GFX11-TRUE16-NEXT: v_add3_u32 v21, v21, v2, 0x7fff +; GFX11-TRUE16-NEXT: v_and_b32_e32 v19, 0xffff0000, v4 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; GFX11-TRUE16-NEXT: v_add3_u32 v23, v25, v1, 0x7fff ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v18, v21, v22, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v19 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v20 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h +; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v52.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v53, v23, v26, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 -; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v3 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v4 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v17, v17, v19, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v4 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff -; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v20 +; GFX11-TRUE16-NEXT: v_dual_add_f32 v22, 0x40c00000, v20 :: v_dual_add_f32 v3, 0x40c00000, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v2 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v53.h ; GFX11-TRUE16-NEXT: v_bfe_u32 v4, v22, 16, 1 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v18 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v54, v19, v21, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v22 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v22, 0x7fff -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v18 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v22 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v6 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v20, v1, v20, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v54.h ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v17 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v55, v1, v19 :: v_dual_and_b32 v2, 0xffff0000, v6 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v55, v1, v19, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v54.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v19, v4, v21, vcc_lo ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 @@ -85304,305 +84803,266 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v10 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v65.h ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v66, v4, v5 :: v_dual_lshlrev_b32 v5, 16, v10 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v66, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v7 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_dual_add_f32 v4, 0x40c00000, v7 :: v_dual_lshlrev_b32 v5, 16, v10 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2 -; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_add_f32 v5, 0x40c00000, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v21 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v24, v1, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v4, 16, 1 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v66.h ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v4, 0x7fff -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[21:22] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[19:20] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[17:18] ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v23, v1, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v5 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v22 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v68, v3, v8 :: v_dual_and_b32 v3, 0xffff0000, v9 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v67, v3, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v12 +; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v6 :: v_dual_lshlrev_b32 v5, 16, v12 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v9 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v11 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v68, v1, v4 :: v_dual_add_f32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v67, v1, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v9 -; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff +; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v11 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v66.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v26, v2, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v68.h +; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v67.h ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff -; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v67.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v23 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v2, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[21:22] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[19:20] +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v68.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v6, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v80, v2, v3, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v8 :: v_dual_lshlrev_b32 v5, 16, v14 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v82.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v80, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v71, v1, v2, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v11 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 24, v26 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v7, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 24, v26 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v80.h ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v26 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v28, v3, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v13 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v80.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v24 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v71.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v25 ; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v27, v2, v3 :: v_dual_add_f32 v2, 0x40c00000, v4 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v28 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v4, v5, vcc_lo -; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 +; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 ; GFX11-TRUE16-NEXT: v_add3_u32 v6, v8, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v28 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff -; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v97.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v87, v6, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v98, v6, v7 :: v_dual_and_b32 v5, 0xffff0000, v16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[34:35], 24, v[27:28] -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v87.h +; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v5 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v15 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v96.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v3, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v16 ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v33 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v33 -; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v23 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v112, v4, v6 :: v_dual_add_f32 v1, 0x40c00000, v5 -; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v8 :: v_dual_lshlrev_b32 v5, 16, v15 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v112.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v32 -; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v98.h ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff +; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[34:35], 24, v[27:28] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26] ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v113, v4, v6 :: v_dual_add_f32 v6, 0x40c00000, v8 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff +; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v103, v2, v9, vcc_lo -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v115, v2, v9, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v113.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[23:24] ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v113, v7, v11, vcc_lo +; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 24, v33 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v33 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v27 +; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v117, v7, v11, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v4, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v39, v4, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v103.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v37, v2, v3, vcc_lo -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v113.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.l, v115.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v117.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v38 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v38 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[29:30], 24, v[37:38] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v39 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v39 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[29:30], 24, v[38:39] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[30:31], 24, v[32:33] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 8, v37 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v38 ; GFX11-TRUE16-NEXT: .LBB108_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v53.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v131.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v17.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v51.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, 0 +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v50.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v18.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v1.l, v1.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v129.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v1.h +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v55.h ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v39.h +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v52.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v130.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v19.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v50.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v31, v1 -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v20.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v2.l, v2.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v19.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v49.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v54.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v2.h ; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v3.l, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v55.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v128.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v21.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v31, v2 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v49.l -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v3.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.l, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v54.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v119.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v22.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v31, v3 +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v4.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v5.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v6.l, v6.h +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v20.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v118.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v65.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v116.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v21.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v48.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v64.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v114.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v22.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v112.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v5.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v6.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v7.h, v8.l +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v8.h, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v67.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v103.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v23.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v4.l, v4.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v5.l, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v65.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v117.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v48.l -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v24.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v31, v4 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v5.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v6.l, v6.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v64.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v116.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v25.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v35.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v31, v5 -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v26.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v6.l, v6.h -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v7.l, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v68.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v114.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v27.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v31, v6 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v34.l -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v7.l, v7.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v36.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v66.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v24.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v101.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v80.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v100.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v7.l, v7.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v8.l, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v9.l, v9.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v10.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v11.l, v11.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v25.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.l, 8, v35.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v68.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.l, 8, v99.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v26.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.l, 8, v97.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v96.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v87.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v27.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.l, 8, v34.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v10.h, v11.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.h, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v12.h, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v13.h, v14.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v71.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v86.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v28.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v85.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v31, v7 -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v8.l, v8.h -; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.l, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v82.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v100.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v30.l -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v33.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v31, v8 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v81.l -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v9.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v67.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v99.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v37.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v29.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v31, v9 -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v38.h -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v10.l, v10.h -; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v97.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v96.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v69.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v31, v10 -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v11.l, v11.h -; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v12.l, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v80.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v86.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v31, v11 -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v12.l, v12.h +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v113.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v84.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v32.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v30.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v98.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v83.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v12.h ; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v13.l, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v112.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v84.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v31, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v13.l, v13.h -; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.l, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v87.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v83.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v31, v13 -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v14.l, v14.h -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v15.l, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v113.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v71.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v31, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v15.l, v15.h -; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v103.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v70.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v31, v15 -; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v16.l, v16.h -; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v17.l, v17.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v31.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v31, v16 +; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v14.l, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v15.l, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v16.l, v16.h +; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v33.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v82.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v117.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.l, 8, v81.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v38.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v115.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.l, 8, v70.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.h, 0xff, v39.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.l, 8, v69.l +; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v15.h, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.h, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v16.l, v17.h, v18.l +; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v18.h, v19.l ; GFX11-TRUE16-NEXT: s_clause 0x3 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:16 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll index ecc715c..11f90b9 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll @@ -3067,9 +3067,9 @@ define i64 @bitcast_v8i8_to_i64(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v5.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v5.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v7.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -3085,52 +3085,47 @@ define i64 @bitcast_v8i8_to_i64(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB26_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v5.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v0.l, v2.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v1.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v5, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.h, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v2 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v2.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v3.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB26_2 ; GFX11-TRUE16-NEXT: .LBB26_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v5.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v4.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v6.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v4.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v4.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v3.l, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v4, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v4, v2 +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v2.h, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v3.h, v1.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -6210,9 +6205,9 @@ define double @bitcast_v8i8_to_f64(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v5.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v5.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v7.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -6228,52 +6223,47 @@ define double @bitcast_v8i8_to_f64(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB50_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v5.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v0.l, v2.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v1.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v5, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.h, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v2 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v2.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v3.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB50_2 ; GFX11-TRUE16-NEXT: .LBB50_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v5.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v4.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v6.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v4.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v4.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v3.l, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v4, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v4, v2 +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v2.h, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v3.h, v1.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -9050,9 +9040,9 @@ define <2 x i32> @bitcast_v8i8_to_v2i32(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v5.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v5.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v7.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -9068,52 +9058,47 @@ define <2 x i32> @bitcast_v8i8_to_v2i32(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB70_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v5.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v0.l, v2.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v1.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v5, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.h, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v2 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v2.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v3.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB70_2 ; GFX11-TRUE16-NEXT: .LBB70_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v5.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v4.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v6.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v4.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v4.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v3.l, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v4, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v4, v2 +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v2.h, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v3.h, v1.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -11590,9 +11575,9 @@ define <2 x float> @bitcast_v8i8_to_v2f32(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v5.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v5.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v7.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -11608,52 +11593,47 @@ define <2 x float> @bitcast_v8i8_to_v2f32(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB86_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v5.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v0.l, v2.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v1.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v5, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.h, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v2 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v2.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v3.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB86_2 ; GFX11-TRUE16-NEXT: .LBB86_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v5.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v4.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v6.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v4.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v4.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v3.l, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v4, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v4, v2 +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v2.h, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v3.h, v1.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -13809,9 +13789,9 @@ define <4 x i16> @bitcast_v8i8_to_v4i16(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v5.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v5.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v7.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -13827,52 +13807,47 @@ define <4 x i16> @bitcast_v8i8_to_v4i16(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB98_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v5.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v0.l, v2.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v1.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v5, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.h, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v2 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v2.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v3.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB98_2 ; GFX11-TRUE16-NEXT: .LBB98_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v5.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v4.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v6.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v4.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v4.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v3.l, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v4, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v4, v2 +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v2.h, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v3.h, v1.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -15630,9 +15605,9 @@ define <4 x half> @bitcast_v8i8_to_v4f16(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v5.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v5.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v7.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -15648,52 +15623,47 @@ define <4 x half> @bitcast_v8i8_to_v4f16(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB106_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v5.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v0.l, v2.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v1.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v5, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.h, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v2 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v2.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v3.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB106_2 ; GFX11-TRUE16-NEXT: .LBB106_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v5.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v4.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v6.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v4.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v4.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v3.l, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v4, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v4, v2 +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v2.h, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v3.h, v1.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -16934,9 +16904,9 @@ define <4 x bfloat> @bitcast_v8i8_to_v4bf16(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v5.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v5.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v7.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -16952,52 +16922,47 @@ define <4 x bfloat> @bitcast_v8i8_to_v4bf16(<8 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB110_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v5.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v0.l, v2.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v1.l, v3.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v5, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.h, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v2 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v2.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v3.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB110_2 ; GFX11-TRUE16-NEXT: .LBB110_4: ; %cmp.true ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v5.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v4.h, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v6.l, 3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v4.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v4.l, 3 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v3.l, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v4.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.h, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v1.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.h -; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v1.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v4, v3 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v4, v2 +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v2.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v2.h, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v3.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v3.h, v1.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll index 685e2fb..9a6ea1b 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll @@ -1104,16 +1104,15 @@ define <3 x i32> @bitcast_v12i8_to_v3i32(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v9.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v8.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v8.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v11.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2 ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v12 @@ -1128,37 +1127,28 @@ define <3 x i32> @bitcast_v12i8_to_v3i32(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB6_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v0.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v7.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v1.l, v4.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v4.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v7, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v1.h, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v7.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v3.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v7, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v2.l, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v7.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v7, v2 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB6_2 ; GFX11-TRUE16-NEXT: .LBB6_4: ; %cmp.true @@ -1166,36 +1156,26 @@ define <3 x i32> @bitcast_v12i8_to_v3i32(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v7.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v6.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 +; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l +; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v5.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v5.h, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v7.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v4.l, v1.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v4.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v7, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v5.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v4.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v5.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v3.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v3.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v4.l, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v7.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v3.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v3.h, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v7, v1 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v2.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v7, v2 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -4254,16 +4234,15 @@ define <3 x float> @bitcast_v12i8_to_v3f32(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v9.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v8.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v9.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v8.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v11.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2 ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v12 @@ -4278,37 +4257,28 @@ define <3 x float> @bitcast_v12i8_to_v3f32(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB22_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.h -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v0.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v7.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v1.l, v4.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v6.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v3.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v3.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v4.l +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v7, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v1.h, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v7.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v3.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v7, v4 -; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v2.l, v3.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v7.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v7, v2 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB22_2 ; GFX11-TRUE16-NEXT: .LBB22_4: ; %cmp.true @@ -4316,36 +4286,26 @@ define <3 x float> @bitcast_v12i8_to_v3f32(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v7.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v6.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v6.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 +; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l +; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v5.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v5.h, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v7.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v4.l, v1.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v4.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v7, v5 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v5.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v4.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v5.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v3.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v3.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v4.l, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v7.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v3.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v3.h, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v7, v1 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.l, 0x300, v2.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v7.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v7, v2 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -6909,12 +6869,12 @@ define <6 x bfloat> @bitcast_v12i8_to_v6bf16(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v10.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v9.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v9.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v12 @@ -6929,37 +6889,28 @@ define <6 x bfloat> @bitcast_v12i8_to_v6bf16(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB36_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v0.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v6.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v1.l, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v7.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v3, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v1.h, v5.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v4.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v3, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v2.l, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v2 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB36_2 ; GFX11-TRUE16-NEXT: .LBB36_4: ; %cmp.true @@ -6967,36 +6918,26 @@ define <6 x bfloat> @bitcast_v12i8_to_v6bf16(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v8.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v7.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v7.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 +; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l +; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v6.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v6.h, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v5.l, v1.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v5.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v3, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v6.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v5.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v6.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v4.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v5.l, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v4.h, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v3, v1 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v2.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v2 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -8669,12 +8610,12 @@ define <6 x half> @bitcast_v12i8_to_v6f16(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v10.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v9.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v9.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v12 @@ -8689,37 +8630,28 @@ define <6 x half> @bitcast_v12i8_to_v6f16(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB40_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v0.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v6.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v1.l, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v7.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v3, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v1.h, v5.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v4.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v3, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v2.l, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v2 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB40_2 ; GFX11-TRUE16-NEXT: .LBB40_4: ; %cmp.true @@ -8727,36 +8659,26 @@ define <6 x half> @bitcast_v12i8_to_v6f16(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v8.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v7.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v7.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 +; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l +; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v6.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v6.h, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v5.l, v1.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v5.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v3, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v6.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v5.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v6.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v4.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v5.l, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v4.h, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v3, v1 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v2.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v2 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -10079,12 +10001,12 @@ define <6 x i16> @bitcast_v12i8_to_v6i16(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v10.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v9.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v10.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v9.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v11.l ; GFX11-TRUE16-NEXT: s_mov_b32 s0, exec_lo ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX11-TRUE16-NEXT: v_cmpx_ne_u32_e32 0, v12 @@ -10099,37 +10021,28 @@ define <6 x i16> @bitcast_v12i8_to_v6i16(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: .LBB44_3: ; %cmp.false ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v7.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v0.l, v6.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v6.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v1.l, v5.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v7.h +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v7.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v6.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v0.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.l, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v2.l, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v5.l ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr9_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr10_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v3, v0 -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v1.h, v5.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v4.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v3, v5 -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v2.l, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.h -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr6_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_lo16 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v2 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5_lo16 ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB44_2 ; GFX11-TRUE16-NEXT: .LBB44_4: ; %cmp.true @@ -10137,36 +10050,26 @@ define <6 x i16> @bitcast_v12i8_to_v6i16(<12 x i8> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v8.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, v7.h, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, v7.l, 3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0 -; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, v8.l, 3 ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, v10.l, 3 +; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l +; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v6.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v6.h, v0.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v5.l, v1.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v6.h, 0x300, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v5.h, v1.h ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v3, v6 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v6.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v5.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v6.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v4.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v5.l, v2.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, 0x300, v0.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, 0x300, v0.h +; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.l, 0x300, v1.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v1.h, 0x300, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v4.l, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v4.h, v2.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v3, v1 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v3.l, 0x300, v2.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.l, 0x300, v2.l ; GFX11-TRUE16-NEXT: v_add_nc_u16 v2.h, 0x300, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v2 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; diff --git a/llvm/test/CodeGen/AMDGPU/calling-conventions.ll b/llvm/test/CodeGen/AMDGPU/calling-conventions.ll index cbf6b66..7dbbeaa 100644 --- a/llvm/test/CodeGen/AMDGPU/calling-conventions.ll +++ b/llvm/test/CodeGen/AMDGPU/calling-conventions.ll @@ -3632,13 +3632,9 @@ define amdgpu_cs void @amdgpu_cs_v32i1(<32 x i1> %arg0) { ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v2.l, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v1.h, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v0.h ; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v2 ; GFX11-TRUE16-NEXT: global_store_b32 v[0:1], v0, off ; GFX11-TRUE16-NEXT: s_endpgm ; @@ -3813,16 +3809,12 @@ define amdgpu_cs void @amdgpu_cs_v32i1(<32 x i1> %arg0) { ; GFX1250-TRUE16-NEXT: v_bitop3_b16 v2.l, v16.l, v16.h, 15 bitop3:0xec ; GFX1250-TRUE16-NEXT: v_or_b16 v2.h, v2.h, v17.l ; GFX1250-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v0.h, 15 bitop3:0xec -; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v1.l -; GFX1250-TRUE16-NEXT: v_mov_b16_e32 v1.l, 0 ; GFX1250-TRUE16-NEXT: v_bitop3_b16 v1.h, v2.l, v2.h, 0xff bitop3:0xec -; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX1250-TRUE16-NEXT: v_bitop3_b16 v0.l, v0.l, v0.h, 0xff bitop3:0xec -; GFX1250-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l -; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-TRUE16-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX1250-TRUE16-NEXT: global_store_b32 v[0:1], v0, off +; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-TRUE16-NEXT: v_bitop3_b16 v1.l, v0.l, v0.h, 0xff bitop3:0xec +; GFX1250-TRUE16-NEXT: global_store_b32 v[0:1], v1, off ; GFX1250-TRUE16-NEXT: s_endpgm ; ; GFX1250-FAKE16-LABEL: amdgpu_cs_v32i1: diff --git a/llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll b/llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll index 26f204f..14897b6 100644 --- a/llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll +++ b/llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll @@ -1771,33 +1771,29 @@ define amdgpu_kernel void @load_v4i8_to_v4f32_2_uses(ptr addrspace(1) noalias %o ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x34 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: global_load_b32 v4, v0, s[0:1] +; GFX11-TRUE16-NEXT: global_load_b32 v5, v0, s[0:1] ; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v4.l, 9 -; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v4.h, 9 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff00, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff00, v4.h -; GFX11-TRUE16-NEXT: v_cvt_f32_ubyte3_e32 v3, v4 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.l, v5.l, 9 +; GFX11-TRUE16-NEXT: v_add_nc_u16 v0.h, v5.h, 9 +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff00, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff00, v5.h +; GFX11-TRUE16-NEXT: v_cvt_f32_ubyte3_e32 v3, v5 ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v0.h -; GFX11-TRUE16-NEXT: v_cvt_f32_ubyte2_e32 v2, v4 +; GFX11-TRUE16-NEXT: v_cvt_f32_ubyte2_e32 v2, v5 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v1.l, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_cvt_f32_ubyte1_e32 v1, v4 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_add_nc_u16 v5.l, 0x900, v0.l -; GFX11-TRUE16-NEXT: v_add_nc_u16 v7.h, 0x900, v0.h -; GFX11-TRUE16-NEXT: v_cvt_f32_ubyte0_e32 v0, v4 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v5, v7 +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v1.l, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v1.h, v0.h +; GFX11-TRUE16-NEXT: v_cvt_f32_ubyte1_e32 v1, v5 +; GFX11-TRUE16-NEXT: v_cvt_f32_ubyte0_e32 v0, v5 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.l, 0x900, v4.l +; GFX11-TRUE16-NEXT: v_add_nc_u16 v4.h, 0x900, v4.h ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: global_store_b128 v6, v[0:3], s[0:1] diff --git a/llvm/test/CodeGen/AMDGPU/fmed3.ll b/llvm/test/CodeGen/AMDGPU/fmed3.ll index 9e15225..3145a27 100644 --- a/llvm/test/CodeGen/AMDGPU/fmed3.ll +++ b/llvm/test/CodeGen/AMDGPU/fmed3.ll @@ -10,7 +10,7 @@ ; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-SDAG,GFX11-SDAG-TRUE16 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-TRUE16 %s -define amdgpu_kernel void @v_test_nnan_input_fmed3_r_i_i_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 { +define amdgpu_kernel void @v_test_nnan_input_fmed3_r_i_i_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) { ; SI-SDAG-LABEL: v_test_nnan_input_fmed3_r_i_i_f32: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 @@ -120,7 +120,7 @@ define amdgpu_kernel void @v_test_nnan_input_fmed3_r_i_i_f32(ptr addrspace(1) %o ret void } -define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 { +define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) { ; SI-SDAG-LABEL: v_test_fmed3_nnan_r_i_i_f32: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 @@ -231,7 +231,7 @@ define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_f32(ptr addrspace(1) %out, pt ret void } -define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_commute0_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 { +define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_commute0_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) { ; SI-SDAG-LABEL: v_test_fmed3_nnan_r_i_i_commute0_f32: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 @@ -342,7 +342,7 @@ define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_commute0_f32(ptr addrspace(1) ret void } -define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_commute1_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 { +define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_commute1_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) { ; SI-SDAG-LABEL: v_test_fmed3_nnan_r_i_i_commute1_f32: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 @@ -453,7 +453,7 @@ define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_commute1_f32(ptr addrspace(1) ret void } -define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_constant_order_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 { +define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_constant_order_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) { ; SI-SDAG-LABEL: v_test_fmed3_nnan_r_i_i_constant_order_f32: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 @@ -569,7 +569,7 @@ define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_constant_order_f32(ptr addrsp ret void } -define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_multi_use_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 { +define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_multi_use_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) { ; SI-SDAG-LABEL: v_test_fmed3_nnan_r_i_i_multi_use_f32: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 @@ -740,7 +740,7 @@ define amdgpu_kernel void @v_test_fmed3_nnan_r_i_i_multi_use_f32(ptr addrspace(1 ret void } -define amdgpu_kernel void @v_test_fmed3_r_i_i_f64(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 { +define amdgpu_kernel void @v_test_fmed3_r_i_i_f64(ptr addrspace(1) %out, ptr addrspace(1) %aptr) { ; SI-SDAG-LABEL: v_test_fmed3_r_i_i_f64: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 @@ -955,14 +955,14 @@ define amdgpu_kernel void @v_test_fmed3_r_i_i_no_nans_f32(ptr addrspace(1) %out, %outgep = getelementptr float, ptr addrspace(1) %out, i32 %tid %a = load float, ptr addrspace(1) %gep0 - %max = call float @llvm.maxnum.f32(float %a, float 2.0) - %med = call float @llvm.minnum.f32(float %max, float 4.0) + %max = call nnan float @llvm.maxnum.f32(float %a, float 2.0) + %med = call nnan float @llvm.minnum.f32(float %max, float 4.0) store float %med, ptr addrspace(1) %outgep ret void } -define amdgpu_kernel void @v_test_legacy_fmed3_r_i_i_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 { +define amdgpu_kernel void @v_test_legacy_fmed3_r_i_i_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr) { ; SI-SDAG-LABEL: v_test_legacy_fmed3_r_i_i_f32: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 @@ -1297,10 +1297,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_srcmod0(ptr addrspa %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 %a.fneg = fsub float -0.0, %a - %tmp0 = call float @llvm.minnum.f32(float %a.fneg, float %b) - %tmp1 = call float @llvm.maxnum.f32(float %a.fneg, float %b) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %a.fneg, float %b) + %tmp1 = call nnan float @llvm.maxnum.f32(float %a.fneg, float %b) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %c) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -1487,10 +1487,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_srcmod1(ptr addrspa %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 %b.fneg = fsub float -0.0, %b - %tmp0 = call float @llvm.minnum.f32(float %a, float %b.fneg) - %tmp1 = call float @llvm.maxnum.f32(float %a, float %b.fneg) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %a, float %b.fneg) + %tmp1 = call nnan float @llvm.maxnum.f32(float %a, float %b.fneg) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %c) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -1677,10 +1677,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_srcmod2(ptr addrspa %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 %c.fneg = fsub float -0.0, %c - %tmp0 = call float @llvm.minnum.f32(float %a, float %b) - %tmp1 = call float @llvm.maxnum.f32(float %a, float %b) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c.fneg) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %a, float %b) + %tmp1 = call nnan float @llvm.maxnum.f32(float %a, float %b) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %c.fneg) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -1872,14 +1872,14 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_srcmod012(ptr addrs %c = load volatile float, ptr addrspace(1) %gep2 %a.fneg = fsub float -0.0, %a - %b.fabs = call float @llvm.fabs.f32(float %b) - %c.fabs = call float @llvm.fabs.f32(float %c) + %b.fabs = call nnan float @llvm.fabs.f32(float %b) + %c.fabs = call nnan float @llvm.fabs.f32(float %c) %c.fabs.fneg = fsub float -0.0, %c.fabs - %tmp0 = call float @llvm.minnum.f32(float %a.fneg, float %b.fabs) - %tmp1 = call float @llvm.maxnum.f32(float %a.fneg, float %b.fabs) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c.fabs.fneg) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %a.fneg, float %b.fabs) + %tmp1 = call nnan float @llvm.maxnum.f32(float %a.fneg, float %b.fabs) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %c.fabs.fneg) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void @@ -2082,16 +2082,16 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_negabs012(ptr addrs %c.fabs = call float @llvm.fabs.f32(float %c) %c.fabs.fneg = fsub float -0.0, %c.fabs - %tmp0 = call float @llvm.minnum.f32(float %a.fabs.fneg, float %b.fabs.fneg) - %tmp1 = call float @llvm.maxnum.f32(float %a.fabs.fneg, float %b.fabs.fneg) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c.fabs.fneg) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %a.fabs.fneg, float %b.fabs.fneg) + %tmp1 = call nnan float @llvm.maxnum.f32(float %a.fabs.fneg, float %b.fabs.fneg) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %c.fabs.fneg) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } -define amdgpu_kernel void @v_nnan_inputs_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_nnan_inputs_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_nnan_inputs_med3_f32_pat0: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -2266,7 +2266,7 @@ define amdgpu_kernel void @v_nnan_inputs_med3_f32_pat0(ptr addrspace(1) %out, pt ret void } -define amdgpu_kernel void @v_nnan_input_calls_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_nnan_input_calls_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_nnan_input_calls_med3_f32_pat0: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -2418,7 +2418,7 @@ define amdgpu_kernel void @v_nnan_input_calls_med3_f32_pat0(ptr addrspace(1) %ou ret void } -define amdgpu_kernel void @v_nnan_call_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_nnan_call_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_nnan_call_med3_f32_pat0: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -2570,7 +2570,7 @@ define amdgpu_kernel void @v_nnan_call_med3_f32_pat0(ptr addrspace(1) %out, ptr ret void } -define amdgpu_kernel void @v_fast_call_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_fast_call_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_fast_call_med3_f32_pat0: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -2878,10 +2878,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0(ptr addrspace(1) %o %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %a, float %b) - %tmp1 = call float @llvm.maxnum.f32(float %a, float %b) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %a, float %b) + %tmp1 = call nnan float @llvm.maxnum.f32(float %a, float %b) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %c) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -3030,10 +3030,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat1(ptr addrspace(1) %o %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %a, float %b) - %tmp1 = call float @llvm.maxnum.f32(float %b, float %a) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %a, float %b) + %tmp1 = call nnan float @llvm.maxnum.f32(float %b, float %a) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %c) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -3220,10 +3220,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat1_srcmod0(ptr addrspa %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 %a.fneg = fsub float -0.0, %a - %tmp0 = call float @llvm.maxnum.f32(float %a.fneg, float %b) - %tmp1 = call float @llvm.minnum.f32(float %a.fneg, float %b) - %tmp2 = call float @llvm.maxnum.f32(float %tmp1, float %c) - %med3 = call float @llvm.minnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.maxnum.f32(float %a.fneg, float %b) + %tmp1 = call nnan float @llvm.minnum.f32(float %a.fneg, float %b) + %tmp2 = call nnan float @llvm.maxnum.f32(float %tmp1, float %c) + %med3 = call nnan float @llvm.minnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -3372,10 +3372,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat2(ptr addrspace(1) %o %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %a, float %b) - %tmp1 = call float @llvm.maxnum.f32(float %a, float %b) - %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %a, float %b) + %tmp1 = call nnan float @llvm.maxnum.f32(float %a, float %b) + %tmp2 = call nnan float @llvm.minnum.f32(float %c, float %tmp1) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -3524,10 +3524,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat3(ptr addrspace(1) %o %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %a, float %b) - %tmp1 = call float @llvm.maxnum.f32(float %b, float %a) - %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %a, float %b) + %tmp1 = call nnan float @llvm.maxnum.f32(float %b, float %a) + %tmp2 = call nnan float @llvm.minnum.f32(float %c, float %tmp1) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -3676,10 +3676,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat4(ptr addrspace(1) %o %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %b, float %a) - %tmp1 = call float @llvm.maxnum.f32(float %b, float %a) - %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %b, float %a) + %tmp1 = call nnan float @llvm.maxnum.f32(float %b, float %a) + %tmp2 = call nnan float @llvm.minnum.f32(float %c, float %tmp1) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -3828,10 +3828,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat5(ptr addrspace(1) %o %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %b, float %a) - %tmp1 = call float @llvm.maxnum.f32(float %b, float %a) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %b, float %a) + %tmp1 = call nnan float @llvm.maxnum.f32(float %b, float %a) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %c) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -3980,10 +3980,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat6(ptr addrspace(1) %o %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %b, float %a) - %tmp1 = call float @llvm.maxnum.f32(float %a, float %b) - %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %b, float %a) + %tmp1 = call nnan float @llvm.maxnum.f32(float %a, float %b) + %tmp2 = call nnan float @llvm.minnum.f32(float %c, float %tmp1) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -4132,10 +4132,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat7(ptr addrspace(1) %o %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %b, float %a) - %tmp1 = call float @llvm.maxnum.f32(float %b, float %a) - %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %b, float %a) + %tmp1 = call nnan float @llvm.maxnum.f32(float %b, float %a) + %tmp2 = call nnan float @llvm.minnum.f32(float %c, float %tmp1) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -4284,10 +4284,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat8(ptr addrspace(1) %o %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %a, float %b) - %tmp1 = call float @llvm.maxnum.f32(float %a, float %b) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c) - %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0) + %tmp0 = call nnan float @llvm.minnum.f32(float %a, float %b) + %tmp1 = call nnan float @llvm.maxnum.f32(float %a, float %b) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %c) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp2, float %tmp0) store float %med3, ptr addrspace(1) %outgep ret void } @@ -4436,10 +4436,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat9(ptr addrspace(1) %o %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %a, float %b) - %tmp1 = call float @llvm.maxnum.f32(float %b, float %a) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c) - %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0) + %tmp0 = call nnan float @llvm.minnum.f32(float %a, float %b) + %tmp1 = call nnan float @llvm.maxnum.f32(float %b, float %a) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %c) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp2, float %tmp0) store float %med3, ptr addrspace(1) %outgep ret void } @@ -4588,10 +4588,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat10(ptr addrspace(1) % %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %a, float %b) - %tmp1 = call float @llvm.maxnum.f32(float %a, float %b) - %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1) - %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0) + %tmp0 = call nnan float @llvm.minnum.f32(float %a, float %b) + %tmp1 = call nnan float @llvm.maxnum.f32(float %a, float %b) + %tmp2 = call nnan float @llvm.minnum.f32(float %c, float %tmp1) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp2, float %tmp0) store float %med3, ptr addrspace(1) %outgep ret void } @@ -4740,10 +4740,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat11(ptr addrspace(1) % %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %a, float %b) - %tmp1 = call float @llvm.maxnum.f32(float %b, float %a) - %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1) - %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0) + %tmp0 = call nnan float @llvm.minnum.f32(float %a, float %b) + %tmp1 = call nnan float @llvm.maxnum.f32(float %b, float %a) + %tmp2 = call nnan float @llvm.minnum.f32(float %c, float %tmp1) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp2, float %tmp0) store float %med3, ptr addrspace(1) %outgep ret void } @@ -4892,10 +4892,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat12(ptr addrspace(1) % %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %b, float %a) - %tmp1 = call float @llvm.maxnum.f32(float %b, float %a) - %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1) - %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0) + %tmp0 = call nnan float @llvm.minnum.f32(float %b, float %a) + %tmp1 = call nnan float @llvm.maxnum.f32(float %b, float %a) + %tmp2 = call nnan float @llvm.minnum.f32(float %c, float %tmp1) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp2, float %tmp0) store float %med3, ptr addrspace(1) %outgep ret void } @@ -5044,10 +5044,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat13(ptr addrspace(1) % %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %b, float %a) - %tmp1 = call float @llvm.maxnum.f32(float %b, float %a) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c) - %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0) + %tmp0 = call nnan float @llvm.minnum.f32(float %b, float %a) + %tmp1 = call nnan float @llvm.maxnum.f32(float %b, float %a) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %c) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp2, float %tmp0) store float %med3, ptr addrspace(1) %outgep ret void } @@ -5196,10 +5196,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat14(ptr addrspace(1) % %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %b, float %a) - %tmp1 = call float @llvm.maxnum.f32(float %a, float %b) - %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1) - %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0) + %tmp0 = call nnan float @llvm.minnum.f32(float %b, float %a) + %tmp1 = call nnan float @llvm.maxnum.f32(float %a, float %b) + %tmp2 = call nnan float @llvm.minnum.f32(float %c, float %tmp1) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp2, float %tmp0) store float %med3, ptr addrspace(1) %outgep ret void } @@ -5348,10 +5348,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat15(ptr addrspace(1) % %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.minnum.f32(float %b, float %a) - %tmp1 = call float @llvm.maxnum.f32(float %b, float %a) - %tmp2 = call float @llvm.minnum.f32(float %c, float %tmp1) - %med3 = call float @llvm.maxnum.f32(float %tmp2, float %tmp0) + %tmp0 = call nnan float @llvm.minnum.f32(float %b, float %a) + %tmp1 = call nnan float @llvm.maxnum.f32(float %b, float %a) + %tmp2 = call nnan float @llvm.minnum.f32(float %c, float %tmp1) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp2, float %tmp0) store float %med3, ptr addrspace(1) %outgep ret void } @@ -5503,10 +5503,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat16(ptr addrspace(1) % %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %tmp0 = call float @llvm.maxnum.f32(float %a, float %b) - %tmp1 = call float @llvm.minnum.f32(float %a, float %b) - %tmp2 = call float @llvm.maxnum.f32(float %tmp1, float %c) - %med3 = call float @llvm.minnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.maxnum.f32(float %a, float %b) + %tmp1 = call nnan float @llvm.minnum.f32(float %a, float %b) + %tmp2 = call nnan float @llvm.maxnum.f32(float %tmp1, float %c) + %med3 = call nnan float @llvm.minnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -5515,7 +5515,7 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat16(ptr addrspace(1) % ; Negative patterns ; --------------------------------------------------------------------- -define amdgpu_kernel void @v_test_safe_med3_f32_pat0_multi_use0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_test_safe_med3_f32_pat0_multi_use0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_test_safe_med3_f32_pat0_multi_use0: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -5717,7 +5717,7 @@ define amdgpu_kernel void @v_test_safe_med3_f32_pat0_multi_use0(ptr addrspace(1) ret void } -define amdgpu_kernel void @v_test_safe_med3_f32_pat0_multi_use1(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_test_safe_med3_f32_pat0_multi_use1(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_test_safe_med3_f32_pat0_multi_use1: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -5944,7 +5944,7 @@ define amdgpu_kernel void @v_test_safe_med3_f32_pat0_multi_use1(ptr addrspace(1) ret void } -define amdgpu_kernel void @v_test_safe_med3_f32_pat0_multi_use2(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_test_safe_med3_f32_pat0_multi_use2(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_test_safe_med3_f32_pat0_multi_use2: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -6146,7 +6146,7 @@ define amdgpu_kernel void @v_test_safe_med3_f32_pat0_multi_use2(ptr addrspace(1) ret void } -define amdgpu_kernel void @v_test_safe_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_test_safe_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_test_safe_med3_f32_pat0: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -6352,7 +6352,7 @@ define amdgpu_kernel void @v_test_safe_med3_f32_pat0(ptr addrspace(1) %out, ptr ret void } -define amdgpu_kernel void @v_nnan_inputs_missing0_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_nnan_inputs_missing0_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_nnan_inputs_missing0_med3_f32_pat0: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -6527,7 +6527,7 @@ define amdgpu_kernel void @v_nnan_inputs_missing0_med3_f32_pat0(ptr addrspace(1) ret void } -define amdgpu_kernel void @v_nnan_inputs_missing1_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_nnan_inputs_missing1_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_nnan_inputs_missing1_med3_f32_pat0: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -6702,7 +6702,7 @@ define amdgpu_kernel void @v_nnan_inputs_missing1_med3_f32_pat0(ptr addrspace(1) ret void } -define amdgpu_kernel void @v_nnan_inputs_missing2_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_nnan_inputs_missing2_med3_f32_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_nnan_inputs_missing2_med3_f32_pat0: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -6877,7 +6877,7 @@ define amdgpu_kernel void @v_nnan_inputs_missing2_med3_f32_pat0(ptr addrspace(1) ret void } -define amdgpu_kernel void @v_test_nnan_on_call_med3_f32_pat0_srcmod0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_test_nnan_on_call_med3_f32_pat0_srcmod0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_test_nnan_on_call_med3_f32_pat0_srcmod0: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -7270,10 +7270,10 @@ define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_srcmod0_mismatch(pt %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 %a.fneg = fsub float -0.0, %a - %tmp0 = call float @llvm.minnum.f32(float %a.fneg, float %b) - %tmp1 = call float @llvm.maxnum.f32(float %a, float %b) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %c) - %med3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %a.fneg, float %b) + %tmp1 = call nnan float @llvm.maxnum.f32(float %a, float %b) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %c) + %med3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %med3, ptr addrspace(1) %outgep ret void } @@ -7428,13 +7428,13 @@ define amdgpu_kernel void @v_test_global_nnans_min_max_f32(ptr addrspace(1) %out %a = load volatile float, ptr addrspace(1) %gep0 %b = load volatile float, ptr addrspace(1) %gep1 %c = load volatile float, ptr addrspace(1) %gep2 - %max = call float @llvm.maxnum.f32(float %a, float %b) - %minmax = call float @llvm.minnum.f32(float %max, float %c) + %max = call nnan float @llvm.maxnum.f32(float %a, float %b) + %minmax = call nnan float @llvm.minnum.f32(float %max, float %c) store float %minmax, ptr addrspace(1) %outgep ret void } -define amdgpu_kernel void @v_test_nnan_input_fmed3_r_i_i_f16(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 { +define amdgpu_kernel void @v_test_nnan_input_fmed3_r_i_i_f16(ptr addrspace(1) %out, ptr addrspace(1) %aptr) { ; SI-SDAG-LABEL: v_test_nnan_input_fmed3_r_i_i_f16: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 @@ -7597,7 +7597,7 @@ define amdgpu_kernel void @v_test_nnan_input_fmed3_r_i_i_f16(ptr addrspace(1) %o ret void } -define amdgpu_kernel void @v_nnan_inputs_med3_f16_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #1 { +define amdgpu_kernel void @v_nnan_inputs_med3_f16_pat0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) { ; SI-SDAG-LABEL: v_nnan_inputs_med3_f16_pat0: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 @@ -7865,7 +7865,7 @@ define amdgpu_kernel void @v_nnan_inputs_med3_f16_pat0(ptr addrspace(1) %out, pt ret void } -define amdgpu_kernel void @two_non_inline_constant(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 { +define amdgpu_kernel void @two_non_inline_constant(ptr addrspace(1) %out, ptr addrspace(1) %aptr) { ; SI-SDAG-LABEL: two_non_inline_constant: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 @@ -7998,7 +7998,7 @@ define amdgpu_kernel void @two_non_inline_constant(ptr addrspace(1) %out, ptr ad } ; FIXME: Simple stores do not work as a multiple use because they are bitcasted to integer constants. -define amdgpu_kernel void @one_non_inline_constant(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 { +define amdgpu_kernel void @one_non_inline_constant(ptr addrspace(1) %out, ptr addrspace(1) %aptr) { ; SI-SDAG-LABEL: one_non_inline_constant: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 @@ -8137,7 +8137,7 @@ define amdgpu_kernel void @one_non_inline_constant(ptr addrspace(1) %out, ptr ad ret void } -define amdgpu_kernel void @two_non_inline_constant_multi_use(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 { +define amdgpu_kernel void @two_non_inline_constant_multi_use(ptr addrspace(1) %out, ptr addrspace(1) %aptr) { ; SI-SDAG-LABEL: two_non_inline_constant_multi_use: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 @@ -8343,7 +8343,7 @@ define amdgpu_kernel void @two_non_inline_constant_multi_use(ptr addrspace(1) %o ret void } -define float @v_test_fmed3_r_i_i_f32_minimumnum_maximumnum(float %a) #1 { +define float @v_test_fmed3_r_i_i_f32_minimumnum_maximumnum(float %a) { ; SI-LABEL: v_test_fmed3_r_i_i_f32_minimumnum_maximumnum: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -8384,7 +8384,7 @@ define float @v_test_fmed3_r_i_i_f32_minimumnum_maximumnum(float %a) #1 { ret float %med } -define <2 x float> @v_test_fmed3_r_i_i_v2f32_minimumnum_maximumnum(<2 x float> %a) #1 { +define <2 x float> @v_test_fmed3_r_i_i_v2f32_minimumnum_maximumnum(<2 x float> %a) { ; SI-SDAG-LABEL: v_test_fmed3_r_i_i_v2f32_minimumnum_maximumnum: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -8452,7 +8452,7 @@ define <2 x float> @v_test_fmed3_r_i_i_v2f32_minimumnum_maximumnum(<2 x float> % ret <2 x float> %med } -define { float, float } @v_test_fmed3_r_i_i_f32_minimumnum_maximumnum_multi_use(float %a) #1 { +define { float, float } @v_test_fmed3_r_i_i_f32_minimumnum_maximumnum_multi_use(float %a) { ; SI-SDAG-LABEL: v_test_fmed3_r_i_i_f32_minimumnum_maximumnum_multi_use: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -8525,7 +8525,7 @@ define { float, float } @v_test_fmed3_r_i_i_f32_minimumnum_maximumnum_multi_use( ret { float, float } %ins.1 } -define float @v_test_nnan_input_fmed3_r_i_i_f32_minimumnum_maximumnum(float %a) #1 { +define float @v_test_nnan_input_fmed3_r_i_i_f32_minimumnum_maximumnum(float %a) { ; SI-LABEL: v_test_nnan_input_fmed3_r_i_i_f32_minimumnum_maximumnum: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -8567,7 +8567,7 @@ define float @v_test_nnan_input_fmed3_r_i_i_f32_minimumnum_maximumnum(float %a) ret float %med } -define float @v_test_nnan_input_fmed3_r_i_i_f32_maximumnum_minimumnum(float %a) #1 { +define float @v_test_nnan_input_fmed3_r_i_i_f32_maximumnum_minimumnum(float %a) { ; SI-LABEL: v_test_nnan_input_fmed3_r_i_i_f32_maximumnum_minimumnum: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -8609,7 +8609,7 @@ define float @v_test_nnan_input_fmed3_r_i_i_f32_maximumnum_minimumnum(float %a) ret float %med } -define float @v_test_nnan_input_fmed3_r_i_i_f32_maxnum_minimumnum(float %a) #1 { +define float @v_test_nnan_input_fmed3_r_i_i_f32_maxnum_minimumnum(float %a) { ; SI-LABEL: v_test_nnan_input_fmed3_r_i_i_f32_maxnum_minimumnum: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -8651,7 +8651,7 @@ define float @v_test_nnan_input_fmed3_r_i_i_f32_maxnum_minimumnum(float %a) #1 { ret float %med } -define float @v_test_nnan_input_fmed3_r_i_i_f32_maximumnum_minnum(float %a) #1 { +define float @v_test_nnan_input_fmed3_r_i_i_f32_maximumnum_minnum(float %a) { ; SI-LABEL: v_test_nnan_input_fmed3_r_i_i_f32_maximumnum_minnum: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -8693,7 +8693,7 @@ define float @v_test_nnan_input_fmed3_r_i_i_f32_maximumnum_minnum(float %a) #1 { ret float %med } -define half @v_test_fmed3_r_i_i_f16_minimumnum_maximumnum(half %a) #1 { +define half @v_test_fmed3_r_i_i_f16_minimumnum_maximumnum(half %a) { ; SI-SDAG-LABEL: v_test_fmed3_r_i_i_f16_minimumnum_maximumnum: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -8772,7 +8772,7 @@ define half @v_test_fmed3_r_i_i_f16_minimumnum_maximumnum(half %a) #1 { ret half %med } -define <2 x half> @v_test_fmed3_r_i_i_v2f16_minimumnum_maximumnum(<2 x half> %a) #1 { +define <2 x half> @v_test_fmed3_r_i_i_v2f16_minimumnum_maximumnum(<2 x half> %a) { ; SI-SDAG-LABEL: v_test_fmed3_r_i_i_v2f16_minimumnum_maximumnum: ; SI-SDAG: ; %bb.0: ; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -8848,7 +8848,7 @@ define <2 x half> @v_test_fmed3_r_i_i_v2f16_minimumnum_maximumnum(<2 x half> %a) ret <2 x half> %med } -define double @v_test_fmed3_r_i_i_f64_minimumnum_maximumnum(double %a) #1 { +define double @v_test_fmed3_r_i_i_f64_minimumnum_maximumnum(double %a) { ; SI-LABEL: v_test_fmed3_r_i_i_f64_minimumnum_maximumnum: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -8905,5 +8905,4 @@ declare half @llvm.minnum.f16(half, half) #0 declare half @llvm.maxnum.f16(half, half) #0 attributes #0 = { nounwind readnone } -attributes #1 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="false" } attributes #2 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="true" } diff --git a/llvm/test/CodeGen/AMDGPU/frem.ll b/llvm/test/CodeGen/AMDGPU/frem.ll index 78a961e..415828f 100644 --- a/llvm/test/CodeGen/AMDGPU/frem.ll +++ b/llvm/test/CodeGen/AMDGPU/frem.ll @@ -4858,7 +4858,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_and_b64 vcc, exec, s[2:3] ; SI-NEXT: v_cvt_f16_f32_e32 v4, v2 ; SI-NEXT: s_cbranch_vccz .LBB9_2 -; SI-NEXT: ; %bb.1: ; %frem.else +; SI-NEXT: ; %bb.1: ; %frem.else20 ; SI-NEXT: v_bfi_b32 v7, s0, 0, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cmp_eq_f32_e32 vcc, v5, v6 @@ -4869,7 +4869,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB9_2: ; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB9_3: ; %frem.compute +; SI-NEXT: .LBB9_3: ; %frem.compute19 ; SI-NEXT: s_mov_b32 s3, 0x7f800000 ; SI-NEXT: v_cmp_lt_f32_e64 vcc, |v5|, s3 ; SI-NEXT: v_frexp_exp_i32_f32_e32 v4, v5 @@ -4905,10 +4905,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f32 v6, v6, v4, 1.0 ; SI-NEXT: s_cmp_lt_i32 s1, 12 ; SI-NEXT: s_cbranch_scc1 .LBB9_7 -; SI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; SI-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; SI-NEXT: s_sub_i32 s1, s2, s3 ; SI-NEXT: s_add_i32 s1, s1, 11 -; SI-NEXT: .LBB9_5: ; %frem.loop_body +; SI-NEXT: .LBB9_5: ; %frem.loop_body27 ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v7, v5 ; SI-NEXT: v_mul_f32_e32 v5, v7, v6 @@ -4923,7 +4923,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_cbranch_scc1 .LBB9_5 ; SI-NEXT: ; %bb.6: ; %Flow55 ; SI-NEXT: v_mov_b32_e32 v5, v7 -; SI-NEXT: .LBB9_7: ; %frem.loop_exit +; SI-NEXT: .LBB9_7: ; %frem.loop_exit28 ; SI-NEXT: s_add_i32 s1, s1, -10 ; SI-NEXT: v_ldexp_f32_e64 v5, v5, s1 ; SI-NEXT: v_mul_f32_e32 v6, v5, v6 @@ -4944,7 +4944,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_cvt_f32_f16_e64 v7, |v7| ; SI-NEXT: v_cmp_ngt_f32_e32 vcc, v6, v7 ; SI-NEXT: s_cbranch_vccz .LBB9_10 -; SI-NEXT: ; %bb.9: ; %frem.else20 +; SI-NEXT: ; %bb.9: ; %frem.else ; SI-NEXT: s_brev_b32 s0, -2 ; SI-NEXT: v_bfi_b32 v8, s0, 0, v0 ; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 @@ -4956,7 +4956,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB9_10: ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB9_11: ; %frem.compute19 +; SI-NEXT: .LBB9_11: ; %frem.compute ; SI-NEXT: s_mov_b32 s3, 0x7f800000 ; SI-NEXT: v_cmp_lt_f32_e64 vcc, |v6|, s3 ; SI-NEXT: v_frexp_exp_i32_f32_e32 v5, v6 @@ -4992,10 +4992,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f32 v7, v7, v5, 1.0 ; SI-NEXT: s_cmp_lt_i32 s1, 12 ; SI-NEXT: s_cbranch_scc1 .LBB9_15 -; SI-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; SI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; SI-NEXT: s_sub_i32 s1, s2, s3 ; SI-NEXT: s_add_i32 s1, s1, 11 -; SI-NEXT: .LBB9_13: ; %frem.loop_body27 +; SI-NEXT: .LBB9_13: ; %frem.loop_body ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v8, v6 ; SI-NEXT: v_mul_f32_e32 v6, v8, v7 @@ -5010,7 +5010,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_cbranch_scc1 .LBB9_13 ; SI-NEXT: ; %bb.14: ; %Flow ; SI-NEXT: v_mov_b32_e32 v6, v8 -; SI-NEXT: .LBB9_15: ; %frem.loop_exit28 +; SI-NEXT: .LBB9_15: ; %frem.loop_exit ; SI-NEXT: s_add_i32 s1, s1, -10 ; SI-NEXT: v_ldexp_f32_e64 v6, v6, s1 ; SI-NEXT: v_mul_f32_e32 v7, v6, v7 @@ -5084,7 +5084,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_and_b32_e32 v5, 0x7fffffff, v3 ; CI-NEXT: s_and_b64 vcc, exec, s[2:3] ; CI-NEXT: s_cbranch_vccz .LBB9_2 -; CI-NEXT: ; %bb.1: ; %frem.else +; CI-NEXT: ; %bb.1: ; %frem.else20 ; CI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; CI-NEXT: v_bfi_b32 v7, s0, 0, v2 ; CI-NEXT: v_cmp_eq_f32_e32 vcc, v6, v5 @@ -5093,7 +5093,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB9_8 ; CI-NEXT: .LBB9_2: ; CI-NEXT: ; implicit-def: $vgpr4 -; CI-NEXT: .LBB9_3: ; %frem.compute +; CI-NEXT: .LBB9_3: ; %frem.compute19 ; CI-NEXT: v_frexp_exp_i32_f32_e32 v9, v6 ; CI-NEXT: v_frexp_mant_f32_e32 v4, v6 ; CI-NEXT: v_frexp_mant_f32_e32 v6, v5 @@ -5118,10 +5118,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 12, v6 ; CI-NEXT: v_div_fixup_f32 v8, v8, v5, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB9_7 -; CI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; CI-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; CI-NEXT: v_sub_i32_e32 v6, vcc, v9, v10 ; CI-NEXT: v_add_i32_e32 v6, vcc, 11, v6 -; CI-NEXT: .LBB9_5: ; %frem.loop_body +; CI-NEXT: .LBB9_5: ; %frem.loop_body27 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v9, v7 ; CI-NEXT: v_mul_f32_e32 v7, v9, v8 @@ -5136,7 +5136,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_cbranch_vccnz .LBB9_5 ; CI-NEXT: ; %bb.6: ; %Flow55 ; CI-NEXT: v_mov_b32_e32 v7, v9 -; CI-NEXT: .LBB9_7: ; %frem.loop_exit +; CI-NEXT: .LBB9_7: ; %frem.loop_exit28 ; CI-NEXT: v_add_i32_e32 v6, vcc, -10, v6 ; CI-NEXT: v_ldexp_f32_e32 v6, v7, v6 ; CI-NEXT: v_mul_f32_e32 v7, v6, v8 @@ -5157,7 +5157,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cvt_f32_f16_e64 v6, |v6| ; CI-NEXT: v_cmp_ngt_f32_e32 vcc, v7, v6 ; CI-NEXT: s_cbranch_vccz .LBB9_10 -; CI-NEXT: ; %bb.9: ; %frem.else20 +; CI-NEXT: ; %bb.9: ; %frem.else ; CI-NEXT: v_cvt_f32_f16_e32 v5, v5 ; CI-NEXT: s_brev_b32 s0, -2 ; CI-NEXT: v_bfi_b32 v8, s0, 0, v0 @@ -5167,7 +5167,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB9_16 ; CI-NEXT: .LBB9_10: ; CI-NEXT: ; implicit-def: $vgpr5 -; CI-NEXT: .LBB9_11: ; %frem.compute19 +; CI-NEXT: .LBB9_11: ; %frem.compute ; CI-NEXT: v_frexp_exp_i32_f32_e32 v10, v7 ; CI-NEXT: v_frexp_mant_f32_e32 v5, v7 ; CI-NEXT: v_frexp_mant_f32_e32 v7, v6 @@ -5192,10 +5192,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 12, v7 ; CI-NEXT: v_div_fixup_f32 v9, v9, v6, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB9_15 -; CI-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; CI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; CI-NEXT: v_sub_i32_e32 v7, vcc, v10, v11 ; CI-NEXT: v_add_i32_e32 v7, vcc, 11, v7 -; CI-NEXT: .LBB9_13: ; %frem.loop_body27 +; CI-NEXT: .LBB9_13: ; %frem.loop_body ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v10, v8 ; CI-NEXT: v_mul_f32_e32 v8, v10, v9 @@ -5210,7 +5210,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_cbranch_vccnz .LBB9_13 ; CI-NEXT: ; %bb.14: ; %Flow ; CI-NEXT: v_mov_b32_e32 v8, v10 -; CI-NEXT: .LBB9_15: ; %frem.loop_exit28 +; CI-NEXT: .LBB9_15: ; %frem.loop_exit ; CI-NEXT: v_add_i32_e32 v7, vcc, -10, v7 ; CI-NEXT: v_ldexp_f32_e32 v7, v8, v7 ; CI-NEXT: v_mul_f32_e32 v8, v7, v9 @@ -5275,7 +5275,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cvt_f32_f16_e64 v3, |v1| ; VI-NEXT: v_cmp_ngt_f32_e32 vcc, v4, v3 ; VI-NEXT: s_cbranch_vccz .LBB9_2 -; VI-NEXT: ; %bb.1: ; %frem.else +; VI-NEXT: ; %bb.1: ; %frem.else20 ; VI-NEXT: s_movk_i32 s2, 0x7fff ; VI-NEXT: v_bfi_b32 v2, s2, 0, v0 ; VI-NEXT: v_cmp_eq_f32_e32 vcc, v4, v3 @@ -5284,7 +5284,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB9_8 ; VI-NEXT: .LBB9_2: ; VI-NEXT: ; implicit-def: $vgpr2 -; VI-NEXT: .LBB9_3: ; %frem.compute +; VI-NEXT: .LBB9_3: ; %frem.compute19 ; VI-NEXT: v_frexp_exp_i32_f32_e32 v7, v4 ; VI-NEXT: v_frexp_mant_f32_e32 v2, v4 ; VI-NEXT: v_frexp_mant_f32_e32 v4, v3 @@ -5309,10 +5309,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 12, v4 ; VI-NEXT: v_div_fixup_f32 v6, v6, v3, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB9_7 -; VI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; VI-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; VI-NEXT: v_sub_u32_e32 v4, vcc, v7, v8 ; VI-NEXT: v_add_u32_e32 v4, vcc, 11, v4 -; VI-NEXT: .LBB9_5: ; %frem.loop_body +; VI-NEXT: .LBB9_5: ; %frem.loop_body27 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v7, v5 ; VI-NEXT: v_mul_f32_e32 v5, v7, v6 @@ -5327,7 +5327,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_cbranch_vccnz .LBB9_5 ; VI-NEXT: ; %bb.6: ; %Flow55 ; VI-NEXT: v_mov_b32_e32 v5, v7 -; VI-NEXT: .LBB9_7: ; %frem.loop_exit +; VI-NEXT: .LBB9_7: ; %frem.loop_exit28 ; VI-NEXT: v_add_u32_e32 v4, vcc, -10, v4 ; VI-NEXT: v_ldexp_f32 v4, v5, v4 ; VI-NEXT: v_mul_f32_e32 v5, v4, v6 @@ -5347,7 +5347,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cvt_f32_f16_e64 v6, |v4| ; VI-NEXT: v_cmp_ngt_f32_e32 vcc, v7, v6 ; VI-NEXT: s_cbranch_vccz .LBB9_10 -; VI-NEXT: ; %bb.9: ; %frem.else20 +; VI-NEXT: ; %bb.9: ; %frem.else ; VI-NEXT: s_movk_i32 s2, 0x7fff ; VI-NEXT: v_bfi_b32 v5, s2, 0, v3 ; VI-NEXT: v_cmp_eq_f32_e32 vcc, v7, v6 @@ -5356,7 +5356,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB9_16 ; VI-NEXT: .LBB9_10: ; VI-NEXT: ; implicit-def: $vgpr5 -; VI-NEXT: .LBB9_11: ; %frem.compute19 +; VI-NEXT: .LBB9_11: ; %frem.compute ; VI-NEXT: v_frexp_exp_i32_f32_e32 v10, v7 ; VI-NEXT: v_frexp_mant_f32_e32 v5, v7 ; VI-NEXT: v_frexp_mant_f32_e32 v7, v6 @@ -5381,10 +5381,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 12, v7 ; VI-NEXT: v_div_fixup_f32 v9, v9, v6, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB9_15 -; VI-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; VI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; VI-NEXT: v_sub_u32_e32 v7, vcc, v10, v11 ; VI-NEXT: v_add_u32_e32 v7, vcc, 11, v7 -; VI-NEXT: .LBB9_13: ; %frem.loop_body27 +; VI-NEXT: .LBB9_13: ; %frem.loop_body ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v10, v8 ; VI-NEXT: v_mul_f32_e32 v8, v10, v9 @@ -5399,7 +5399,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_cbranch_vccnz .LBB9_13 ; VI-NEXT: ; %bb.14: ; %Flow ; VI-NEXT: v_mov_b32_e32 v8, v10 -; VI-NEXT: .LBB9_15: ; %frem.loop_exit28 +; VI-NEXT: .LBB9_15: ; %frem.loop_exit ; VI-NEXT: v_add_u32_e32 v7, vcc, -10, v7 ; VI-NEXT: v_ldexp_f32 v7, v8, v7 ; VI-NEXT: v_mul_f32_e32 v8, v7, v9 @@ -5443,7 +5443,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cvt_f32_f16_e64 v3, |v0| ; GFX9-NEXT: v_cmp_ngt_f32_e32 vcc, v4, v3 ; GFX9-NEXT: s_cbranch_vccz .LBB9_2 -; GFX9-NEXT: ; %bb.1: ; %frem.else +; GFX9-NEXT: ; %bb.1: ; %frem.else20 ; GFX9-NEXT: s_movk_i32 s2, 0x7fff ; GFX9-NEXT: v_bfi_b32 v2, s2, 0, v1 ; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, v4, v3 @@ -5452,7 +5452,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB9_8 ; GFX9-NEXT: .LBB9_2: ; GFX9-NEXT: ; implicit-def: $vgpr2 -; GFX9-NEXT: .LBB9_3: ; %frem.compute +; GFX9-NEXT: .LBB9_3: ; %frem.compute19 ; GFX9-NEXT: v_frexp_exp_i32_f32_e32 v7, v4 ; GFX9-NEXT: v_frexp_mant_f32_e32 v2, v4 ; GFX9-NEXT: v_frexp_mant_f32_e32 v4, v3 @@ -5477,10 +5477,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 12, v4 ; GFX9-NEXT: v_div_fixup_f32 v6, v6, v3, 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB9_7 -; GFX9-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX9-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; GFX9-NEXT: v_sub_u32_e32 v4, v7, v8 ; GFX9-NEXT: v_add_u32_e32 v4, 11, v4 -; GFX9-NEXT: .LBB9_5: ; %frem.loop_body +; GFX9-NEXT: .LBB9_5: ; %frem.loop_body27 ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v7, v5 ; GFX9-NEXT: v_mul_f32_e32 v5, v7, v6 @@ -5495,7 +5495,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_cbranch_vccnz .LBB9_5 ; GFX9-NEXT: ; %bb.6: ; %Flow55 ; GFX9-NEXT: v_mov_b32_e32 v5, v7 -; GFX9-NEXT: .LBB9_7: ; %frem.loop_exit +; GFX9-NEXT: .LBB9_7: ; %frem.loop_exit28 ; GFX9-NEXT: v_add_u32_e32 v4, -10, v4 ; GFX9-NEXT: v_ldexp_f32 v4, v5, v4 ; GFX9-NEXT: v_mul_f32_e32 v5, v4, v6 @@ -5514,7 +5514,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cvt_f32_f16_sdwa v5, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; GFX9-NEXT: v_cmp_ngt_f32_e32 vcc, v6, v5 ; GFX9-NEXT: s_cbranch_vccz .LBB9_10 -; GFX9-NEXT: ; %bb.9: ; %frem.else20 +; GFX9-NEXT: ; %bb.9: ; %frem.else ; GFX9-NEXT: s_movk_i32 s2, 0x7fff ; GFX9-NEXT: v_bfi_b32 v4, s2, 0, v3 ; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, v6, v5 @@ -5523,7 +5523,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB9_16 ; GFX9-NEXT: .LBB9_10: ; GFX9-NEXT: ; implicit-def: $vgpr4 -; GFX9-NEXT: .LBB9_11: ; %frem.compute19 +; GFX9-NEXT: .LBB9_11: ; %frem.compute ; GFX9-NEXT: v_frexp_exp_i32_f32_e32 v9, v6 ; GFX9-NEXT: v_frexp_mant_f32_e32 v4, v6 ; GFX9-NEXT: v_frexp_mant_f32_e32 v6, v5 @@ -5548,10 +5548,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 12, v6 ; GFX9-NEXT: v_div_fixup_f32 v8, v8, v5, 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB9_15 -; GFX9-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX9-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX9-NEXT: v_sub_u32_e32 v6, v9, v10 ; GFX9-NEXT: v_add_u32_e32 v6, 11, v6 -; GFX9-NEXT: .LBB9_13: ; %frem.loop_body27 +; GFX9-NEXT: .LBB9_13: ; %frem.loop_body ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v9, v7 ; GFX9-NEXT: v_mul_f32_e32 v7, v9, v8 @@ -5566,7 +5566,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_cbranch_vccnz .LBB9_13 ; GFX9-NEXT: ; %bb.14: ; %Flow ; GFX9-NEXT: v_mov_b32_e32 v7, v9 -; GFX9-NEXT: .LBB9_15: ; %frem.loop_exit28 +; GFX9-NEXT: .LBB9_15: ; %frem.loop_exit ; GFX9-NEXT: v_add_u32_e32 v6, -10, v6 ; GFX9-NEXT: v_ldexp_f32 v6, v7, v6 ; GFX9-NEXT: v_mul_f32_e32 v7, v6, v8 @@ -5612,7 +5612,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cvt_f32_f16_e64 v3, |v0| ; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v4, v3 ; GFX10-NEXT: s_cbranch_vccz .LBB9_2 -; GFX10-NEXT: ; %bb.1: ; %frem.else +; GFX10-NEXT: ; %bb.1: ; %frem.else20 ; GFX10-NEXT: v_bfi_b32 v2, 0x7fff, 0, v1 ; GFX10-NEXT: v_cmp_eq_f32_e32 vcc_lo, v4, v3 ; GFX10-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc_lo @@ -5620,7 +5620,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB9_8 ; GFX10-NEXT: .LBB9_2: ; GFX10-NEXT: ; implicit-def: $vgpr2 -; GFX10-NEXT: .LBB9_3: ; %frem.compute +; GFX10-NEXT: .LBB9_3: ; %frem.compute19 ; GFX10-NEXT: v_frexp_mant_f32_e32 v2, v4 ; GFX10-NEXT: v_frexp_mant_f32_e32 v6, v3 ; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v5, v4 @@ -5647,10 +5647,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v6 ; GFX10-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB9_7 -; GFX10-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX10-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 11 -; GFX10-NEXT: .LBB9_5: ; %frem.loop_body +; GFX10-NEXT: .LBB9_5: ; %frem.loop_body27 ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v7, v4 ; GFX10-NEXT: s_add_i32 s2, s2, -11 @@ -5666,7 +5666,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: ; %bb.6: ; %Flow55 ; GFX10-NEXT: v_mov_b32_e32 v6, s2 ; GFX10-NEXT: v_mov_b32_e32 v4, v7 -; GFX10-NEXT: .LBB9_7: ; %frem.loop_exit +; GFX10-NEXT: .LBB9_7: ; %frem.loop_exit28 ; GFX10-NEXT: v_add_nc_u32_e32 v6, -10, v6 ; GFX10-NEXT: v_ldexp_f32 v4, v4, v6 ; GFX10-NEXT: v_mul_f32_e32 v5, v4, v5 @@ -5684,7 +5684,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cvt_f32_f16_e64 v6, |v3| ; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v6, v4 ; GFX10-NEXT: s_cbranch_vccz .LBB9_10 -; GFX10-NEXT: ; %bb.9: ; %frem.else20 +; GFX10-NEXT: ; %bb.9: ; %frem.else ; GFX10-NEXT: v_bfi_b32 v5, 0x7fff, 0, v3 ; GFX10-NEXT: v_cmp_eq_f32_e32 vcc_lo, v6, v4 ; GFX10-NEXT: v_cndmask_b32_e32 v5, v3, v5, vcc_lo @@ -5692,7 +5692,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB9_16 ; GFX10-NEXT: .LBB9_10: ; GFX10-NEXT: ; implicit-def: $vgpr5 -; GFX10-NEXT: .LBB9_11: ; %frem.compute19 +; GFX10-NEXT: .LBB9_11: ; %frem.compute ; GFX10-NEXT: v_frexp_mant_f32_e32 v5, v6 ; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v7, v6 ; GFX10-NEXT: v_ldexp_f32 v6, v5, 11 @@ -5719,10 +5719,10 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v8 ; GFX10-NEXT: v_div_fixup_f32 v7, v7, v5, 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB9_15 -; GFX10-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX10-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 11 -; GFX10-NEXT: .LBB9_13: ; %frem.loop_body27 +; GFX10-NEXT: .LBB9_13: ; %frem.loop_body ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v9, v6 ; GFX10-NEXT: s_add_i32 s2, s2, -11 @@ -5738,7 +5738,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: ; %bb.14: ; %Flow ; GFX10-NEXT: v_mov_b32_e32 v8, s2 ; GFX10-NEXT: v_mov_b32_e32 v6, v9 -; GFX10-NEXT: .LBB9_15: ; %frem.loop_exit28 +; GFX10-NEXT: .LBB9_15: ; %frem.loop_exit ; GFX10-NEXT: v_add_nc_u32_e32 v8, -10, v8 ; GFX10-NEXT: v_ldexp_f32 v6, v6, v8 ; GFX10-NEXT: v_mul_f32_e32 v7, v6, v7 @@ -5782,7 +5782,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v4, v3 ; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB9_2 -; GFX11-TRUE16-NEXT: ; %bb.1: ; %frem.else +; GFX11-TRUE16-NEXT: ; %bb.1: ; %frem.else20 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, 0 ; GFX11-TRUE16-NEXT: v_cmp_eq_f32_e32 vcc_lo, v4, v3 @@ -5793,7 +5793,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_branch .LBB9_8 ; GFX11-TRUE16-NEXT: .LBB9_2: ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2 -; GFX11-TRUE16-NEXT: .LBB9_3: ; %frem.compute +; GFX11-TRUE16-NEXT: .LBB9_3: ; %frem.compute19 ; GFX11-TRUE16-NEXT: v_frexp_mant_f32_e32 v2, v4 ; GFX11-TRUE16-NEXT: v_frexp_mant_f32_e32 v6, v3 ; GFX11-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v5, v4 @@ -5829,11 +5829,11 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB9_7 -; GFX11-TRUE16-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX11-TRUE16-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; GFX11-TRUE16-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 11 -; GFX11-TRUE16-NEXT: .LBB9_5: ; %frem.loop_body +; GFX11-TRUE16-NEXT: .LBB9_5: ; %frem.loop_body27 ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v4 @@ -5853,7 +5853,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: ; %bb.6: ; %Flow55 ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, s2 ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v7 -; GFX11-TRUE16-NEXT: .LBB9_7: ; %frem.loop_exit +; GFX11-TRUE16-NEXT: .LBB9_7: ; %frem.loop_exit28 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v6, -10, v6 ; GFX11-TRUE16-NEXT: v_ldexp_f32 v4, v4, v6 @@ -5880,7 +5880,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v6, v5 ; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB9_10 -; GFX11-TRUE16-NEXT: ; %bb.9: ; %frem.else20 +; GFX11-TRUE16-NEXT: ; %bb.9: ; %frem.else ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v3.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, 0 ; GFX11-TRUE16-NEXT: v_cmp_eq_f32_e32 vcc_lo, v6, v5 @@ -5891,7 +5891,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_branch .LBB9_16 ; GFX11-TRUE16-NEXT: .LBB9_10: ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7 -; GFX11-TRUE16-NEXT: .LBB9_11: ; %frem.compute19 +; GFX11-TRUE16-NEXT: .LBB9_11: ; %frem.compute ; GFX11-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v8, v6 ; GFX11-TRUE16-NEXT: v_frexp_mant_f32_e32 v6, v6 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) @@ -5927,11 +5927,11 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_div_fixup_f32 v8, v8, v6, 1.0 ; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB9_15 -; GFX11-TRUE16-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX11-TRUE16-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX11-TRUE16-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 11 -; GFX11-TRUE16-NEXT: .LBB9_13: ; %frem.loop_body27 +; GFX11-TRUE16-NEXT: .LBB9_13: ; %frem.loop_body ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v10, v7 @@ -5951,7 +5951,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: ; %bb.14: ; %Flow ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v9, s2 ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v10 -; GFX11-TRUE16-NEXT: .LBB9_15: ; %frem.loop_exit28 +; GFX11-TRUE16-NEXT: .LBB9_15: ; %frem.loop_exit ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v9, -10, v9 ; GFX11-TRUE16-NEXT: v_ldexp_f32 v7, v7, v9 @@ -6002,7 +6002,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v4, v3 ; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB9_2 -; GFX11-FAKE16-NEXT: ; %bb.1: ; %frem.else +; GFX11-FAKE16-NEXT: ; %bb.1: ; %frem.else20 ; GFX11-FAKE16-NEXT: v_bfi_b32 v2, 0x7fff, 0, v0 ; GFX11-FAKE16-NEXT: v_cmp_eq_f32_e32 vcc_lo, v4, v3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) @@ -6011,7 +6011,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_branch .LBB9_8 ; GFX11-FAKE16-NEXT: .LBB9_2: ; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2 -; GFX11-FAKE16-NEXT: .LBB9_3: ; %frem.compute +; GFX11-FAKE16-NEXT: .LBB9_3: ; %frem.compute19 ; GFX11-FAKE16-NEXT: v_frexp_mant_f32_e32 v2, v4 ; GFX11-FAKE16-NEXT: v_frexp_mant_f32_e32 v6, v3 ; GFX11-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v5, v4 @@ -6047,11 +6047,11 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB9_7 -; GFX11-FAKE16-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX11-FAKE16-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; GFX11-FAKE16-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 11 -; GFX11-FAKE16-NEXT: .LBB9_5: ; %frem.loop_body +; GFX11-FAKE16-NEXT: .LBB9_5: ; %frem.loop_body27 ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v4 @@ -6071,7 +6071,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: ; %bb.6: ; %Flow55 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, s2 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v7 -; GFX11-FAKE16-NEXT: .LBB9_7: ; %frem.loop_exit +; GFX11-FAKE16-NEXT: .LBB9_7: ; %frem.loop_exit28 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v6, -10, v6 ; GFX11-FAKE16-NEXT: v_ldexp_f32 v4, v4, v6 @@ -6097,7 +6097,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v7, v5 ; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB9_10 -; GFX11-FAKE16-NEXT: ; %bb.9: ; %frem.else20 +; GFX11-FAKE16-NEXT: ; %bb.9: ; %frem.else ; GFX11-FAKE16-NEXT: v_bfi_b32 v6, 0x7fff, 0, v3 ; GFX11-FAKE16-NEXT: v_cmp_eq_f32_e32 vcc_lo, v7, v5 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) @@ -6106,7 +6106,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_branch .LBB9_16 ; GFX11-FAKE16-NEXT: .LBB9_10: ; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr6 -; GFX11-FAKE16-NEXT: .LBB9_11: ; %frem.compute19 +; GFX11-FAKE16-NEXT: .LBB9_11: ; %frem.compute ; GFX11-FAKE16-NEXT: v_frexp_mant_f32_e32 v6, v7 ; GFX11-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v8, v7 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4) @@ -6142,11 +6142,11 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_div_fixup_f32 v8, v8, v6, 1.0 ; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB9_15 -; GFX11-FAKE16-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX11-FAKE16-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX11-FAKE16-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 11 -; GFX11-FAKE16-NEXT: .LBB9_13: ; %frem.loop_body27 +; GFX11-FAKE16-NEXT: .LBB9_13: ; %frem.loop_body ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v10, v7 @@ -6166,7 +6166,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: ; %bb.14: ; %Flow ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v9, s2 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v10 -; GFX11-FAKE16-NEXT: .LBB9_15: ; %frem.loop_exit28 +; GFX11-FAKE16-NEXT: .LBB9_15: ; %frem.loop_exit ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v9, -10, v9 ; GFX11-FAKE16-NEXT: v_ldexp_f32 v7, v7, v9 @@ -6220,7 +6220,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1150-TRUE16-NEXT: s_cmp_ngt_f32 s6, s5 ; GFX1150-TRUE16-NEXT: s_cbranch_scc0 .LBB9_2 -; GFX1150-TRUE16-NEXT: ; %bb.1: ; %frem.else +; GFX1150-TRUE16-NEXT: ; %bb.1: ; %frem.else20 ; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v0.l, s4 ; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v1.l, 0 ; GFX1150-TRUE16-NEXT: s_cmp_eq_f32 s6, s5 @@ -6232,7 +6232,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: s_branch .LBB9_8 ; GFX1150-TRUE16-NEXT: .LBB9_2: ; GFX1150-TRUE16-NEXT: ; implicit-def: $vgpr0 -; GFX1150-TRUE16-NEXT: .LBB9_3: ; %frem.compute +; GFX1150-TRUE16-NEXT: .LBB9_3: ; %frem.compute19 ; GFX1150-TRUE16-NEXT: v_frexp_mant_f32_e32 v1, s5 ; GFX1150-TRUE16-NEXT: v_frexp_mant_f32_e32 v0, s6 ; GFX1150-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v3, s6 @@ -6267,11 +6267,11 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v4 ; GFX1150-TRUE16-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; GFX1150-TRUE16-NEXT: s_cbranch_vccnz .LBB9_7 -; GFX1150-TRUE16-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1150-TRUE16-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; GFX1150-TRUE16-NEXT: s_sub_i32 s5, s6, s5 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-TRUE16-NEXT: s_add_i32 s5, s5, 11 -; GFX1150-TRUE16-NEXT: .LBB9_5: ; %frem.loop_body +; GFX1150-TRUE16-NEXT: .LBB9_5: ; %frem.loop_body27 ; GFX1150-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v5, v2 @@ -6293,7 +6293,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: ; %bb.6: ; %Flow55 ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v4, s5 ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v2, v5 -; GFX1150-TRUE16-NEXT: .LBB9_7: ; %frem.loop_exit +; GFX1150-TRUE16-NEXT: .LBB9_7: ; %frem.loop_exit28 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-TRUE16-NEXT: v_add_nc_u32_e32 v4, -10, v4 ; GFX1150-TRUE16-NEXT: v_ldexp_f32 v2, v2, v4 @@ -6323,7 +6323,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1150-TRUE16-NEXT: s_cmp_ngt_f32 s8, s7 ; GFX1150-TRUE16-NEXT: s_cbranch_scc0 .LBB9_10 -; GFX1150-TRUE16-NEXT: ; %bb.9: ; %frem.else20 +; GFX1150-TRUE16-NEXT: ; %bb.9: ; %frem.else ; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v1.l, s6 ; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0 ; GFX1150-TRUE16-NEXT: s_cmp_eq_f32 s8, s7 @@ -6335,7 +6335,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: s_branch .LBB9_16 ; GFX1150-TRUE16-NEXT: .LBB9_10: ; GFX1150-TRUE16-NEXT: ; implicit-def: $vgpr1 -; GFX1150-TRUE16-NEXT: .LBB9_11: ; %frem.compute19 +; GFX1150-TRUE16-NEXT: .LBB9_11: ; %frem.compute ; GFX1150-TRUE16-NEXT: v_frexp_mant_f32_e32 v2, s7 ; GFX1150-TRUE16-NEXT: v_frexp_mant_f32_e32 v1, s8 ; GFX1150-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v4, s8 @@ -6370,11 +6370,11 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v5 ; GFX1150-TRUE16-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; GFX1150-TRUE16-NEXT: s_cbranch_vccnz .LBB9_15 -; GFX1150-TRUE16-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX1150-TRUE16-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX1150-TRUE16-NEXT: s_sub_i32 s7, s8, s7 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-TRUE16-NEXT: s_add_i32 s7, s7, 11 -; GFX1150-TRUE16-NEXT: .LBB9_13: ; %frem.loop_body27 +; GFX1150-TRUE16-NEXT: .LBB9_13: ; %frem.loop_body ; GFX1150-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v6, v3 @@ -6396,7 +6396,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: ; %bb.14: ; %Flow ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v5, s7 ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v3, v6 -; GFX1150-TRUE16-NEXT: .LBB9_15: ; %frem.loop_exit28 +; GFX1150-TRUE16-NEXT: .LBB9_15: ; %frem.loop_exit ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-TRUE16-NEXT: v_add_nc_u32_e32 v5, -10, v5 ; GFX1150-TRUE16-NEXT: v_ldexp_f32 v3, v3, v5 @@ -6459,7 +6459,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1150-FAKE16-NEXT: s_cmp_ngt_f32 s6, s5 ; GFX1150-FAKE16-NEXT: s_cbranch_scc0 .LBB9_2 -; GFX1150-FAKE16-NEXT: ; %bb.1: ; %frem.else +; GFX1150-FAKE16-NEXT: ; %bb.1: ; %frem.else20 ; GFX1150-FAKE16-NEXT: s_cmp_eq_f32 s6, s5 ; GFX1150-FAKE16-NEXT: v_bfi_b32 v0, 0x7fff, 0, s4 ; GFX1150-FAKE16-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -6469,7 +6469,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: s_branch .LBB9_8 ; GFX1150-FAKE16-NEXT: .LBB9_2: ; GFX1150-FAKE16-NEXT: ; implicit-def: $vgpr0 -; GFX1150-FAKE16-NEXT: .LBB9_3: ; %frem.compute +; GFX1150-FAKE16-NEXT: .LBB9_3: ; %frem.compute19 ; GFX1150-FAKE16-NEXT: v_frexp_mant_f32_e32 v1, s5 ; GFX1150-FAKE16-NEXT: v_frexp_mant_f32_e32 v0, s6 ; GFX1150-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v3, s6 @@ -6504,11 +6504,11 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v4 ; GFX1150-FAKE16-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; GFX1150-FAKE16-NEXT: s_cbranch_vccnz .LBB9_7 -; GFX1150-FAKE16-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1150-FAKE16-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; GFX1150-FAKE16-NEXT: s_sub_i32 s5, s6, s5 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-FAKE16-NEXT: s_add_i32 s5, s5, 11 -; GFX1150-FAKE16-NEXT: .LBB9_5: ; %frem.loop_body +; GFX1150-FAKE16-NEXT: .LBB9_5: ; %frem.loop_body27 ; GFX1150-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v5, v2 @@ -6530,7 +6530,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: ; %bb.6: ; %Flow55 ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v4, s5 ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v2, v5 -; GFX1150-FAKE16-NEXT: .LBB9_7: ; %frem.loop_exit +; GFX1150-FAKE16-NEXT: .LBB9_7: ; %frem.loop_exit28 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-FAKE16-NEXT: v_add_nc_u32_e32 v4, -10, v4 ; GFX1150-FAKE16-NEXT: v_ldexp_f32 v2, v2, v4 @@ -6559,7 +6559,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1150-FAKE16-NEXT: s_cmp_ngt_f32 s8, s7 ; GFX1150-FAKE16-NEXT: s_cbranch_scc0 .LBB9_10 -; GFX1150-FAKE16-NEXT: ; %bb.9: ; %frem.else20 +; GFX1150-FAKE16-NEXT: ; %bb.9: ; %frem.else ; GFX1150-FAKE16-NEXT: s_cmp_eq_f32 s8, s7 ; GFX1150-FAKE16-NEXT: v_bfi_b32 v1, 0x7fff, 0, s6 ; GFX1150-FAKE16-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -6569,7 +6569,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: s_branch .LBB9_16 ; GFX1150-FAKE16-NEXT: .LBB9_10: ; GFX1150-FAKE16-NEXT: ; implicit-def: $vgpr1 -; GFX1150-FAKE16-NEXT: .LBB9_11: ; %frem.compute19 +; GFX1150-FAKE16-NEXT: .LBB9_11: ; %frem.compute ; GFX1150-FAKE16-NEXT: v_frexp_mant_f32_e32 v2, s7 ; GFX1150-FAKE16-NEXT: v_frexp_mant_f32_e32 v1, s8 ; GFX1150-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v4, s8 @@ -6604,11 +6604,11 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v5 ; GFX1150-FAKE16-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; GFX1150-FAKE16-NEXT: s_cbranch_vccnz .LBB9_15 -; GFX1150-FAKE16-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX1150-FAKE16-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX1150-FAKE16-NEXT: s_sub_i32 s7, s8, s7 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-FAKE16-NEXT: s_add_i32 s7, s7, 11 -; GFX1150-FAKE16-NEXT: .LBB9_13: ; %frem.loop_body27 +; GFX1150-FAKE16-NEXT: .LBB9_13: ; %frem.loop_body ; GFX1150-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v6, v3 @@ -6630,7 +6630,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: ; %bb.14: ; %Flow ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v5, s7 ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v3, v6 -; GFX1150-FAKE16-NEXT: .LBB9_15: ; %frem.loop_exit28 +; GFX1150-FAKE16-NEXT: .LBB9_15: ; %frem.loop_exit ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-FAKE16-NEXT: v_add_nc_u32_e32 v5, -10, v5 ; GFX1150-FAKE16-NEXT: v_ldexp_f32 v3, v3, v5 @@ -6690,7 +6690,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1200-TRUE16-NEXT: s_cmp_ngt_f32 s6, s5 ; GFX1200-TRUE16-NEXT: s_cbranch_scc0 .LBB9_2 -; GFX1200-TRUE16-NEXT: ; %bb.1: ; %frem.else +; GFX1200-TRUE16-NEXT: ; %bb.1: ; %frem.else20 ; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v0.l, s4 ; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v1.l, 0 ; GFX1200-TRUE16-NEXT: s_cmp_eq_f32 s6, s5 @@ -6702,7 +6702,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: s_branch .LBB9_8 ; GFX1200-TRUE16-NEXT: .LBB9_2: ; GFX1200-TRUE16-NEXT: ; implicit-def: $vgpr0 -; GFX1200-TRUE16-NEXT: .LBB9_3: ; %frem.compute +; GFX1200-TRUE16-NEXT: .LBB9_3: ; %frem.compute19 ; GFX1200-TRUE16-NEXT: v_frexp_mant_f32_e32 v1, s5 ; GFX1200-TRUE16-NEXT: v_frexp_mant_f32_e32 v0, s6 ; GFX1200-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v3, s6 @@ -6737,11 +6737,11 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v4 ; GFX1200-TRUE16-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; GFX1200-TRUE16-NEXT: s_cbranch_vccnz .LBB9_7 -; GFX1200-TRUE16-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1200-TRUE16-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; GFX1200-TRUE16-NEXT: s_sub_co_i32 s5, s6, s5 ; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX1200-TRUE16-NEXT: s_add_co_i32 s5, s5, 11 -; GFX1200-TRUE16-NEXT: .LBB9_5: ; %frem.loop_body +; GFX1200-TRUE16-NEXT: .LBB9_5: ; %frem.loop_body27 ; GFX1200-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v5, v2 @@ -6765,7 +6765,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: ; %bb.6: ; %Flow55 ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v4, s5 ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v2, v5 -; GFX1200-TRUE16-NEXT: .LBB9_7: ; %frem.loop_exit +; GFX1200-TRUE16-NEXT: .LBB9_7: ; %frem.loop_exit28 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_add_nc_u32_e32 v4, -10, v4 ; GFX1200-TRUE16-NEXT: v_ldexp_f32 v2, v2, v4 @@ -6799,7 +6799,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_2) ; GFX1200-TRUE16-NEXT: s_cmp_ngt_f32 s8, s7 ; GFX1200-TRUE16-NEXT: s_cbranch_scc0 .LBB9_10 -; GFX1200-TRUE16-NEXT: ; %bb.9: ; %frem.else20 +; GFX1200-TRUE16-NEXT: ; %bb.9: ; %frem.else ; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v1.l, s6 ; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0 ; GFX1200-TRUE16-NEXT: s_cmp_eq_f32 s8, s7 @@ -6811,7 +6811,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: s_branch .LBB9_16 ; GFX1200-TRUE16-NEXT: .LBB9_10: ; GFX1200-TRUE16-NEXT: ; implicit-def: $vgpr1 -; GFX1200-TRUE16-NEXT: .LBB9_11: ; %frem.compute19 +; GFX1200-TRUE16-NEXT: .LBB9_11: ; %frem.compute ; GFX1200-TRUE16-NEXT: v_frexp_mant_f32_e32 v2, s7 ; GFX1200-TRUE16-NEXT: v_frexp_mant_f32_e32 v1, s8 ; GFX1200-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v4, s8 @@ -6847,11 +6847,11 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v5 ; GFX1200-TRUE16-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; GFX1200-TRUE16-NEXT: s_cbranch_vccnz .LBB9_15 -; GFX1200-TRUE16-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX1200-TRUE16-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX1200-TRUE16-NEXT: s_sub_co_i32 s7, s8, s7 ; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX1200-TRUE16-NEXT: s_add_co_i32 s7, s7, 11 -; GFX1200-TRUE16-NEXT: .LBB9_13: ; %frem.loop_body27 +; GFX1200-TRUE16-NEXT: .LBB9_13: ; %frem.loop_body ; GFX1200-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v6, v3 @@ -6875,7 +6875,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: ; %bb.14: ; %Flow ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v5, s7 ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v3, v6 -; GFX1200-TRUE16-NEXT: .LBB9_15: ; %frem.loop_exit28 +; GFX1200-TRUE16-NEXT: .LBB9_15: ; %frem.loop_exit ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_add_nc_u32_e32 v5, -10, v5 ; GFX1200-TRUE16-NEXT: v_ldexp_f32 v3, v3, v5 @@ -6940,7 +6940,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1200-FAKE16-NEXT: s_cmp_ngt_f32 s6, s5 ; GFX1200-FAKE16-NEXT: s_cbranch_scc0 .LBB9_2 -; GFX1200-FAKE16-NEXT: ; %bb.1: ; %frem.else +; GFX1200-FAKE16-NEXT: ; %bb.1: ; %frem.else20 ; GFX1200-FAKE16-NEXT: s_cmp_eq_f32 s6, s5 ; GFX1200-FAKE16-NEXT: v_bfi_b32 v0, 0x7fff, 0, s4 ; GFX1200-FAKE16-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -6950,7 +6950,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: s_branch .LBB9_8 ; GFX1200-FAKE16-NEXT: .LBB9_2: ; GFX1200-FAKE16-NEXT: ; implicit-def: $vgpr0 -; GFX1200-FAKE16-NEXT: .LBB9_3: ; %frem.compute +; GFX1200-FAKE16-NEXT: .LBB9_3: ; %frem.compute19 ; GFX1200-FAKE16-NEXT: v_frexp_mant_f32_e32 v1, s5 ; GFX1200-FAKE16-NEXT: v_frexp_mant_f32_e32 v0, s6 ; GFX1200-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v3, s6 @@ -6986,11 +6986,11 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v4 ; GFX1200-FAKE16-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; GFX1200-FAKE16-NEXT: s_cbranch_vccnz .LBB9_7 -; GFX1200-FAKE16-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1200-FAKE16-NEXT: ; %bb.4: ; %frem.loop_body27.preheader ; GFX1200-FAKE16-NEXT: s_sub_co_i32 s5, s6, s5 ; GFX1200-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX1200-FAKE16-NEXT: s_add_co_i32 s5, s5, 11 -; GFX1200-FAKE16-NEXT: .LBB9_5: ; %frem.loop_body +; GFX1200-FAKE16-NEXT: .LBB9_5: ; %frem.loop_body27 ; GFX1200-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v5, v2 @@ -7014,7 +7014,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: ; %bb.6: ; %Flow55 ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v4, s5 ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v2, v5 -; GFX1200-FAKE16-NEXT: .LBB9_7: ; %frem.loop_exit +; GFX1200-FAKE16-NEXT: .LBB9_7: ; %frem.loop_exit28 ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-FAKE16-NEXT: v_add_nc_u32_e32 v4, -10, v4 ; GFX1200-FAKE16-NEXT: v_ldexp_f32 v2, v2, v4 @@ -7047,7 +7047,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_2) ; GFX1200-FAKE16-NEXT: s_cmp_ngt_f32 s8, s7 ; GFX1200-FAKE16-NEXT: s_cbranch_scc0 .LBB9_10 -; GFX1200-FAKE16-NEXT: ; %bb.9: ; %frem.else20 +; GFX1200-FAKE16-NEXT: ; %bb.9: ; %frem.else ; GFX1200-FAKE16-NEXT: s_cmp_eq_f32 s8, s7 ; GFX1200-FAKE16-NEXT: v_bfi_b32 v1, 0x7fff, 0, s6 ; GFX1200-FAKE16-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -7058,7 +7058,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: s_branch .LBB9_16 ; GFX1200-FAKE16-NEXT: .LBB9_10: ; GFX1200-FAKE16-NEXT: ; implicit-def: $vgpr1 -; GFX1200-FAKE16-NEXT: .LBB9_11: ; %frem.compute19 +; GFX1200-FAKE16-NEXT: .LBB9_11: ; %frem.compute ; GFX1200-FAKE16-NEXT: v_frexp_mant_f32_e32 v2, s7 ; GFX1200-FAKE16-NEXT: v_frexp_mant_f32_e32 v1, s8 ; GFX1200-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v4, s8 @@ -7094,11 +7094,11 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v5 ; GFX1200-FAKE16-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; GFX1200-FAKE16-NEXT: s_cbranch_vccnz .LBB9_15 -; GFX1200-FAKE16-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX1200-FAKE16-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX1200-FAKE16-NEXT: s_sub_co_i32 s7, s8, s7 ; GFX1200-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX1200-FAKE16-NEXT: s_add_co_i32 s7, s7, 11 -; GFX1200-FAKE16-NEXT: .LBB9_13: ; %frem.loop_body27 +; GFX1200-FAKE16-NEXT: .LBB9_13: ; %frem.loop_body ; GFX1200-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v6, v3 @@ -7122,7 +7122,7 @@ define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: ; %bb.14: ; %Flow ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v5, s7 ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v3, v6 -; GFX1200-FAKE16-NEXT: .LBB9_15: ; %frem.loop_exit28 +; GFX1200-FAKE16-NEXT: .LBB9_15: ; %frem.loop_exit ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-FAKE16-NEXT: v_add_nc_u32_e32 v5, -10, v5 ; GFX1200-FAKE16-NEXT: v_ldexp_f32 v3, v3, v5 @@ -7208,7 +7208,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_and_b64 vcc, exec, s[2:3] ; SI-NEXT: v_cvt_f16_f32_e32 v8, v6 ; SI-NEXT: s_cbranch_vccz .LBB10_2 -; SI-NEXT: ; %bb.1: ; %frem.else +; SI-NEXT: ; %bb.1: ; %frem.else86 ; SI-NEXT: v_bfi_b32 v11, s0, 0, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cmp_eq_f32_e32 vcc, v9, v10 @@ -7219,7 +7219,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB10_2: ; SI-NEXT: ; implicit-def: $vgpr8 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB10_3: ; %frem.compute +; SI-NEXT: .LBB10_3: ; %frem.compute85 ; SI-NEXT: s_mov_b32 s3, 0x7f800000 ; SI-NEXT: v_cmp_lt_f32_e64 vcc, |v9|, s3 ; SI-NEXT: v_frexp_exp_i32_f32_e32 v8, v9 @@ -7255,10 +7255,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f32 v10, v10, v8, 1.0 ; SI-NEXT: s_cmp_lt_i32 s1, 12 ; SI-NEXT: s_cbranch_scc1 .LBB10_7 -; SI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; SI-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; SI-NEXT: s_sub_i32 s1, s2, s3 ; SI-NEXT: s_add_i32 s1, s1, 11 -; SI-NEXT: .LBB10_5: ; %frem.loop_body +; SI-NEXT: .LBB10_5: ; %frem.loop_body93 ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v11, v9 ; SI-NEXT: v_mul_f32_e32 v9, v11, v10 @@ -7273,7 +7273,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_cbranch_scc1 .LBB10_5 ; SI-NEXT: ; %bb.6: ; %Flow133 ; SI-NEXT: v_mov_b32_e32 v9, v11 -; SI-NEXT: .LBB10_7: ; %frem.loop_exit +; SI-NEXT: .LBB10_7: ; %frem.loop_exit94 ; SI-NEXT: s_add_i32 s1, s1, -10 ; SI-NEXT: v_ldexp_f32_e64 v9, v9, s1 ; SI-NEXT: v_mul_f32_e32 v10, v9, v10 @@ -7294,7 +7294,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_cvt_f32_f16_e64 v11, |v11| ; SI-NEXT: v_cmp_ngt_f32_e32 vcc, v10, v11 ; SI-NEXT: s_cbranch_vccz .LBB10_10 -; SI-NEXT: ; %bb.9: ; %frem.else20 +; SI-NEXT: ; %bb.9: ; %frem.else53 ; SI-NEXT: s_brev_b32 s0, -2 ; SI-NEXT: v_bfi_b32 v12, s0, 0, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 @@ -7306,7 +7306,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB10_10: ; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB10_11: ; %frem.compute19 +; SI-NEXT: .LBB10_11: ; %frem.compute52 ; SI-NEXT: s_mov_b32 s3, 0x7f800000 ; SI-NEXT: v_cmp_lt_f32_e64 vcc, |v10|, s3 ; SI-NEXT: v_frexp_exp_i32_f32_e32 v9, v10 @@ -7342,10 +7342,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f32 v11, v11, v9, 1.0 ; SI-NEXT: s_cmp_lt_i32 s1, 12 ; SI-NEXT: s_cbranch_scc1 .LBB10_15 -; SI-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; SI-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; SI-NEXT: s_sub_i32 s1, s2, s3 ; SI-NEXT: s_add_i32 s1, s1, 11 -; SI-NEXT: .LBB10_13: ; %frem.loop_body27 +; SI-NEXT: .LBB10_13: ; %frem.loop_body60 ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v12, v10 ; SI-NEXT: v_mul_f32_e32 v10, v12, v11 @@ -7360,7 +7360,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_cbranch_scc1 .LBB10_13 ; SI-NEXT: ; %bb.14: ; %Flow129 ; SI-NEXT: v_mov_b32_e32 v10, v12 -; SI-NEXT: .LBB10_15: ; %frem.loop_exit28 +; SI-NEXT: .LBB10_15: ; %frem.loop_exit61 ; SI-NEXT: s_add_i32 s1, s1, -10 ; SI-NEXT: v_ldexp_f32_e64 v10, v10, s1 ; SI-NEXT: v_mul_f32_e32 v11, v10, v11 @@ -7381,7 +7381,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_cvt_f32_f16_e64 v12, |v12| ; SI-NEXT: v_cmp_ngt_f32_e32 vcc, v11, v12 ; SI-NEXT: s_cbranch_vccz .LBB10_18 -; SI-NEXT: ; %bb.17: ; %frem.else53 +; SI-NEXT: ; %bb.17: ; %frem.else20 ; SI-NEXT: s_brev_b32 s0, -2 ; SI-NEXT: v_bfi_b32 v13, s0, 0, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 @@ -7393,7 +7393,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB10_18: ; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB10_19: ; %frem.compute52 +; SI-NEXT: .LBB10_19: ; %frem.compute19 ; SI-NEXT: s_mov_b32 s3, 0x7f800000 ; SI-NEXT: v_cmp_lt_f32_e64 vcc, |v11|, s3 ; SI-NEXT: v_frexp_exp_i32_f32_e32 v10, v11 @@ -7429,10 +7429,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f32 v12, v12, v10, 1.0 ; SI-NEXT: s_cmp_lt_i32 s1, 12 ; SI-NEXT: s_cbranch_scc1 .LBB10_23 -; SI-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; SI-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; SI-NEXT: s_sub_i32 s1, s2, s3 ; SI-NEXT: s_add_i32 s1, s1, 11 -; SI-NEXT: .LBB10_21: ; %frem.loop_body60 +; SI-NEXT: .LBB10_21: ; %frem.loop_body27 ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v13, v11 ; SI-NEXT: v_mul_f32_e32 v11, v13, v12 @@ -7447,7 +7447,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_cbranch_scc1 .LBB10_21 ; SI-NEXT: ; %bb.22: ; %Flow125 ; SI-NEXT: v_mov_b32_e32 v11, v13 -; SI-NEXT: .LBB10_23: ; %frem.loop_exit61 +; SI-NEXT: .LBB10_23: ; %frem.loop_exit28 ; SI-NEXT: s_add_i32 s1, s1, -10 ; SI-NEXT: v_ldexp_f32_e64 v11, v11, s1 ; SI-NEXT: v_mul_f32_e32 v12, v11, v12 @@ -7468,7 +7468,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_cvt_f32_f16_e64 v13, |v13| ; SI-NEXT: v_cmp_ngt_f32_e32 vcc, v12, v13 ; SI-NEXT: s_cbranch_vccz .LBB10_26 -; SI-NEXT: ; %bb.25: ; %frem.else86 +; SI-NEXT: ; %bb.25: ; %frem.else ; SI-NEXT: s_brev_b32 s0, -2 ; SI-NEXT: v_bfi_b32 v14, s0, 0, v0 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 @@ -7480,7 +7480,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB10_26: ; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB10_27: ; %frem.compute85 +; SI-NEXT: .LBB10_27: ; %frem.compute ; SI-NEXT: s_mov_b32 s3, 0x7f800000 ; SI-NEXT: v_cmp_lt_f32_e64 vcc, |v12|, s3 ; SI-NEXT: v_frexp_exp_i32_f32_e32 v11, v12 @@ -7516,10 +7516,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f32 v13, v13, v11, 1.0 ; SI-NEXT: s_cmp_lt_i32 s1, 12 ; SI-NEXT: s_cbranch_scc1 .LBB10_31 -; SI-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; SI-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; SI-NEXT: s_sub_i32 s1, s2, s3 ; SI-NEXT: s_add_i32 s1, s1, 11 -; SI-NEXT: .LBB10_29: ; %frem.loop_body93 +; SI-NEXT: .LBB10_29: ; %frem.loop_body ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v14, v12 ; SI-NEXT: v_mul_f32_e32 v12, v14, v13 @@ -7534,7 +7534,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_cbranch_scc1 .LBB10_29 ; SI-NEXT: ; %bb.30: ; %Flow ; SI-NEXT: v_mov_b32_e32 v12, v14 -; SI-NEXT: .LBB10_31: ; %frem.loop_exit94 +; SI-NEXT: .LBB10_31: ; %frem.loop_exit ; SI-NEXT: s_add_i32 s1, s1, -10 ; SI-NEXT: v_ldexp_f32_e64 v12, v12, s1 ; SI-NEXT: v_mul_f32_e32 v13, v12, v13 @@ -7638,7 +7638,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_and_b32_e32 v9, 0x7fffffff, v7 ; CI-NEXT: s_and_b64 vcc, exec, s[2:3] ; CI-NEXT: s_cbranch_vccz .LBB10_2 -; CI-NEXT: ; %bb.1: ; %frem.else +; CI-NEXT: ; %bb.1: ; %frem.else86 ; CI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; CI-NEXT: v_bfi_b32 v11, s0, 0, v6 ; CI-NEXT: v_cmp_eq_f32_e32 vcc, v10, v9 @@ -7647,7 +7647,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB10_8 ; CI-NEXT: .LBB10_2: ; CI-NEXT: ; implicit-def: $vgpr8 -; CI-NEXT: .LBB10_3: ; %frem.compute +; CI-NEXT: .LBB10_3: ; %frem.compute85 ; CI-NEXT: v_frexp_exp_i32_f32_e32 v13, v10 ; CI-NEXT: v_frexp_mant_f32_e32 v8, v10 ; CI-NEXT: v_frexp_mant_f32_e32 v10, v9 @@ -7672,10 +7672,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 12, v10 ; CI-NEXT: v_div_fixup_f32 v12, v12, v9, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB10_7 -; CI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; CI-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; CI-NEXT: v_sub_i32_e32 v10, vcc, v13, v14 ; CI-NEXT: v_add_i32_e32 v10, vcc, 11, v10 -; CI-NEXT: .LBB10_5: ; %frem.loop_body +; CI-NEXT: .LBB10_5: ; %frem.loop_body93 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v13, v11 ; CI-NEXT: v_mul_f32_e32 v11, v13, v12 @@ -7690,7 +7690,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_cbranch_vccnz .LBB10_5 ; CI-NEXT: ; %bb.6: ; %Flow133 ; CI-NEXT: v_mov_b32_e32 v11, v13 -; CI-NEXT: .LBB10_7: ; %frem.loop_exit +; CI-NEXT: .LBB10_7: ; %frem.loop_exit94 ; CI-NEXT: v_add_i32_e32 v10, vcc, -10, v10 ; CI-NEXT: v_ldexp_f32_e32 v10, v11, v10 ; CI-NEXT: v_mul_f32_e32 v11, v10, v12 @@ -7711,7 +7711,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cvt_f32_f16_e64 v10, |v10| ; CI-NEXT: v_cmp_ngt_f32_e32 vcc, v11, v10 ; CI-NEXT: s_cbranch_vccz .LBB10_10 -; CI-NEXT: ; %bb.9: ; %frem.else20 +; CI-NEXT: ; %bb.9: ; %frem.else53 ; CI-NEXT: v_cvt_f32_f16_e32 v9, v9 ; CI-NEXT: s_brev_b32 s0, -2 ; CI-NEXT: v_bfi_b32 v12, s0, 0, v4 @@ -7721,7 +7721,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB10_16 ; CI-NEXT: .LBB10_10: ; CI-NEXT: ; implicit-def: $vgpr9 -; CI-NEXT: .LBB10_11: ; %frem.compute19 +; CI-NEXT: .LBB10_11: ; %frem.compute52 ; CI-NEXT: v_frexp_exp_i32_f32_e32 v14, v11 ; CI-NEXT: v_frexp_mant_f32_e32 v9, v11 ; CI-NEXT: v_frexp_mant_f32_e32 v11, v10 @@ -7746,10 +7746,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 12, v11 ; CI-NEXT: v_div_fixup_f32 v13, v13, v10, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB10_15 -; CI-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; CI-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; CI-NEXT: v_sub_i32_e32 v11, vcc, v14, v15 ; CI-NEXT: v_add_i32_e32 v11, vcc, 11, v11 -; CI-NEXT: .LBB10_13: ; %frem.loop_body27 +; CI-NEXT: .LBB10_13: ; %frem.loop_body60 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v14, v12 ; CI-NEXT: v_mul_f32_e32 v12, v14, v13 @@ -7764,7 +7764,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_cbranch_vccnz .LBB10_13 ; CI-NEXT: ; %bb.14: ; %Flow129 ; CI-NEXT: v_mov_b32_e32 v12, v14 -; CI-NEXT: .LBB10_15: ; %frem.loop_exit28 +; CI-NEXT: .LBB10_15: ; %frem.loop_exit61 ; CI-NEXT: v_add_i32_e32 v11, vcc, -10, v11 ; CI-NEXT: v_ldexp_f32_e32 v11, v12, v11 ; CI-NEXT: v_mul_f32_e32 v12, v11, v13 @@ -7785,7 +7785,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cvt_f32_f16_e64 v11, |v11| ; CI-NEXT: v_cmp_ngt_f32_e32 vcc, v12, v11 ; CI-NEXT: s_cbranch_vccz .LBB10_18 -; CI-NEXT: ; %bb.17: ; %frem.else53 +; CI-NEXT: ; %bb.17: ; %frem.else20 ; CI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; CI-NEXT: s_brev_b32 s0, -2 ; CI-NEXT: v_bfi_b32 v13, s0, 0, v2 @@ -7795,7 +7795,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB10_24 ; CI-NEXT: .LBB10_18: ; CI-NEXT: ; implicit-def: $vgpr10 -; CI-NEXT: .LBB10_19: ; %frem.compute52 +; CI-NEXT: .LBB10_19: ; %frem.compute19 ; CI-NEXT: v_frexp_exp_i32_f32_e32 v15, v12 ; CI-NEXT: v_frexp_mant_f32_e32 v10, v12 ; CI-NEXT: v_frexp_mant_f32_e32 v12, v11 @@ -7820,10 +7820,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 12, v12 ; CI-NEXT: v_div_fixup_f32 v14, v14, v11, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB10_23 -; CI-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; CI-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; CI-NEXT: v_sub_i32_e32 v12, vcc, v15, v16 ; CI-NEXT: v_add_i32_e32 v12, vcc, 11, v12 -; CI-NEXT: .LBB10_21: ; %frem.loop_body60 +; CI-NEXT: .LBB10_21: ; %frem.loop_body27 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v15, v13 ; CI-NEXT: v_mul_f32_e32 v13, v15, v14 @@ -7838,7 +7838,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_cbranch_vccnz .LBB10_21 ; CI-NEXT: ; %bb.22: ; %Flow125 ; CI-NEXT: v_mov_b32_e32 v13, v15 -; CI-NEXT: .LBB10_23: ; %frem.loop_exit61 +; CI-NEXT: .LBB10_23: ; %frem.loop_exit28 ; CI-NEXT: v_add_i32_e32 v12, vcc, -10, v12 ; CI-NEXT: v_ldexp_f32_e32 v12, v13, v12 ; CI-NEXT: v_mul_f32_e32 v13, v12, v14 @@ -7859,7 +7859,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cvt_f32_f16_e64 v12, |v12| ; CI-NEXT: v_cmp_ngt_f32_e32 vcc, v13, v12 ; CI-NEXT: s_cbranch_vccz .LBB10_26 -; CI-NEXT: ; %bb.25: ; %frem.else86 +; CI-NEXT: ; %bb.25: ; %frem.else ; CI-NEXT: v_cvt_f32_f16_e32 v11, v11 ; CI-NEXT: s_brev_b32 s0, -2 ; CI-NEXT: v_bfi_b32 v14, s0, 0, v0 @@ -7869,7 +7869,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB10_32 ; CI-NEXT: .LBB10_26: ; CI-NEXT: ; implicit-def: $vgpr11 -; CI-NEXT: .LBB10_27: ; %frem.compute85 +; CI-NEXT: .LBB10_27: ; %frem.compute ; CI-NEXT: v_frexp_exp_i32_f32_e32 v16, v13 ; CI-NEXT: v_frexp_mant_f32_e32 v11, v13 ; CI-NEXT: v_frexp_mant_f32_e32 v13, v12 @@ -7894,10 +7894,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 12, v13 ; CI-NEXT: v_div_fixup_f32 v15, v15, v12, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB10_31 -; CI-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; CI-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; CI-NEXT: v_sub_i32_e32 v13, vcc, v16, v17 ; CI-NEXT: v_add_i32_e32 v13, vcc, 11, v13 -; CI-NEXT: .LBB10_29: ; %frem.loop_body93 +; CI-NEXT: .LBB10_29: ; %frem.loop_body ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v16, v14 ; CI-NEXT: v_mul_f32_e32 v14, v16, v15 @@ -7912,7 +7912,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_cbranch_vccnz .LBB10_29 ; CI-NEXT: ; %bb.30: ; %Flow ; CI-NEXT: v_mov_b32_e32 v14, v16 -; CI-NEXT: .LBB10_31: ; %frem.loop_exit94 +; CI-NEXT: .LBB10_31: ; %frem.loop_exit ; CI-NEXT: v_add_i32_e32 v13, vcc, -10, v13 ; CI-NEXT: v_ldexp_f32_e32 v13, v14, v13 ; CI-NEXT: v_mul_f32_e32 v14, v13, v15 @@ -8001,7 +8001,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cvt_f32_f16_e64 v5, |v2| ; VI-NEXT: v_cmp_ngt_f32_e32 vcc, v6, v5 ; VI-NEXT: s_cbranch_vccz .LBB10_2 -; VI-NEXT: ; %bb.1: ; %frem.else +; VI-NEXT: ; %bb.1: ; %frem.else86 ; VI-NEXT: s_movk_i32 s2, 0x7fff ; VI-NEXT: v_bfi_b32 v4, s2, 0, v0 ; VI-NEXT: v_cmp_eq_f32_e32 vcc, v6, v5 @@ -8010,7 +8010,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB10_8 ; VI-NEXT: .LBB10_2: ; VI-NEXT: ; implicit-def: $vgpr4 -; VI-NEXT: .LBB10_3: ; %frem.compute +; VI-NEXT: .LBB10_3: ; %frem.compute85 ; VI-NEXT: v_frexp_exp_i32_f32_e32 v9, v6 ; VI-NEXT: v_frexp_mant_f32_e32 v4, v6 ; VI-NEXT: v_frexp_mant_f32_e32 v6, v5 @@ -8035,10 +8035,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 12, v6 ; VI-NEXT: v_div_fixup_f32 v8, v8, v5, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB10_7 -; VI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; VI-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; VI-NEXT: v_sub_u32_e32 v6, vcc, v9, v10 ; VI-NEXT: v_add_u32_e32 v6, vcc, 11, v6 -; VI-NEXT: .LBB10_5: ; %frem.loop_body +; VI-NEXT: .LBB10_5: ; %frem.loop_body93 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v9, v7 ; VI-NEXT: v_mul_f32_e32 v7, v9, v8 @@ -8053,7 +8053,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_cbranch_vccnz .LBB10_5 ; VI-NEXT: ; %bb.6: ; %Flow133 ; VI-NEXT: v_mov_b32_e32 v7, v9 -; VI-NEXT: .LBB10_7: ; %frem.loop_exit +; VI-NEXT: .LBB10_7: ; %frem.loop_exit94 ; VI-NEXT: v_add_u32_e32 v6, vcc, -10, v6 ; VI-NEXT: v_ldexp_f32 v6, v7, v6 ; VI-NEXT: v_mul_f32_e32 v7, v6, v8 @@ -8073,7 +8073,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cvt_f32_f16_e64 v8, |v6| ; VI-NEXT: v_cmp_ngt_f32_e32 vcc, v9, v8 ; VI-NEXT: s_cbranch_vccz .LBB10_10 -; VI-NEXT: ; %bb.9: ; %frem.else20 +; VI-NEXT: ; %bb.9: ; %frem.else53 ; VI-NEXT: s_movk_i32 s2, 0x7fff ; VI-NEXT: v_bfi_b32 v7, s2, 0, v5 ; VI-NEXT: v_cmp_eq_f32_e32 vcc, v9, v8 @@ -8082,7 +8082,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB10_16 ; VI-NEXT: .LBB10_10: ; VI-NEXT: ; implicit-def: $vgpr7 -; VI-NEXT: .LBB10_11: ; %frem.compute19 +; VI-NEXT: .LBB10_11: ; %frem.compute52 ; VI-NEXT: v_frexp_exp_i32_f32_e32 v12, v9 ; VI-NEXT: v_frexp_mant_f32_e32 v7, v9 ; VI-NEXT: v_frexp_mant_f32_e32 v9, v8 @@ -8107,10 +8107,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 12, v9 ; VI-NEXT: v_div_fixup_f32 v11, v11, v8, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB10_15 -; VI-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; VI-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; VI-NEXT: v_sub_u32_e32 v9, vcc, v12, v13 ; VI-NEXT: v_add_u32_e32 v9, vcc, 11, v9 -; VI-NEXT: .LBB10_13: ; %frem.loop_body27 +; VI-NEXT: .LBB10_13: ; %frem.loop_body60 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v12, v10 ; VI-NEXT: v_mul_f32_e32 v10, v12, v11 @@ -8125,7 +8125,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_cbranch_vccnz .LBB10_13 ; VI-NEXT: ; %bb.14: ; %Flow129 ; VI-NEXT: v_mov_b32_e32 v10, v12 -; VI-NEXT: .LBB10_15: ; %frem.loop_exit28 +; VI-NEXT: .LBB10_15: ; %frem.loop_exit61 ; VI-NEXT: v_add_u32_e32 v9, vcc, -10, v9 ; VI-NEXT: v_ldexp_f32 v9, v10, v9 ; VI-NEXT: v_mul_f32_e32 v10, v9, v11 @@ -8143,7 +8143,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cvt_f32_f16_e64 v9, |v3| ; VI-NEXT: v_cmp_ngt_f32_e32 vcc, v10, v9 ; VI-NEXT: s_cbranch_vccz .LBB10_18 -; VI-NEXT: ; %bb.17: ; %frem.else53 +; VI-NEXT: ; %bb.17: ; %frem.else20 ; VI-NEXT: s_movk_i32 s2, 0x7fff ; VI-NEXT: v_bfi_b32 v8, s2, 0, v1 ; VI-NEXT: v_cmp_eq_f32_e32 vcc, v10, v9 @@ -8152,7 +8152,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB10_24 ; VI-NEXT: .LBB10_18: ; VI-NEXT: ; implicit-def: $vgpr8 -; VI-NEXT: .LBB10_19: ; %frem.compute52 +; VI-NEXT: .LBB10_19: ; %frem.compute19 ; VI-NEXT: v_frexp_exp_i32_f32_e32 v13, v10 ; VI-NEXT: v_frexp_mant_f32_e32 v8, v10 ; VI-NEXT: v_frexp_mant_f32_e32 v10, v9 @@ -8177,10 +8177,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 12, v10 ; VI-NEXT: v_div_fixup_f32 v12, v12, v9, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB10_23 -; VI-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; VI-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; VI-NEXT: v_sub_u32_e32 v10, vcc, v13, v14 ; VI-NEXT: v_add_u32_e32 v10, vcc, 11, v10 -; VI-NEXT: .LBB10_21: ; %frem.loop_body60 +; VI-NEXT: .LBB10_21: ; %frem.loop_body27 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v13, v11 ; VI-NEXT: v_mul_f32_e32 v11, v13, v12 @@ -8195,7 +8195,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_cbranch_vccnz .LBB10_21 ; VI-NEXT: ; %bb.22: ; %Flow125 ; VI-NEXT: v_mov_b32_e32 v11, v13 -; VI-NEXT: .LBB10_23: ; %frem.loop_exit61 +; VI-NEXT: .LBB10_23: ; %frem.loop_exit28 ; VI-NEXT: v_add_u32_e32 v10, vcc, -10, v10 ; VI-NEXT: v_ldexp_f32 v10, v11, v10 ; VI-NEXT: v_mul_f32_e32 v11, v10, v12 @@ -8215,7 +8215,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cvt_f32_f16_e64 v12, |v10| ; VI-NEXT: v_cmp_ngt_f32_e32 vcc, v13, v12 ; VI-NEXT: s_cbranch_vccz .LBB10_26 -; VI-NEXT: ; %bb.25: ; %frem.else86 +; VI-NEXT: ; %bb.25: ; %frem.else ; VI-NEXT: s_movk_i32 s2, 0x7fff ; VI-NEXT: v_bfi_b32 v11, s2, 0, v9 ; VI-NEXT: v_cmp_eq_f32_e32 vcc, v13, v12 @@ -8224,7 +8224,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB10_32 ; VI-NEXT: .LBB10_26: ; VI-NEXT: ; implicit-def: $vgpr11 -; VI-NEXT: .LBB10_27: ; %frem.compute85 +; VI-NEXT: .LBB10_27: ; %frem.compute ; VI-NEXT: v_frexp_exp_i32_f32_e32 v16, v13 ; VI-NEXT: v_frexp_mant_f32_e32 v11, v13 ; VI-NEXT: v_frexp_mant_f32_e32 v13, v12 @@ -8249,10 +8249,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 12, v13 ; VI-NEXT: v_div_fixup_f32 v15, v15, v12, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB10_31 -; VI-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; VI-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; VI-NEXT: v_sub_u32_e32 v13, vcc, v16, v17 ; VI-NEXT: v_add_u32_e32 v13, vcc, 11, v13 -; VI-NEXT: .LBB10_29: ; %frem.loop_body93 +; VI-NEXT: .LBB10_29: ; %frem.loop_body ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v16, v14 ; VI-NEXT: v_mul_f32_e32 v14, v16, v15 @@ -8267,7 +8267,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_cbranch_vccnz .LBB10_29 ; VI-NEXT: ; %bb.30: ; %Flow ; VI-NEXT: v_mov_b32_e32 v14, v16 -; VI-NEXT: .LBB10_31: ; %frem.loop_exit94 +; VI-NEXT: .LBB10_31: ; %frem.loop_exit ; VI-NEXT: v_add_u32_e32 v13, vcc, -10, v13 ; VI-NEXT: v_ldexp_f32 v13, v14, v13 ; VI-NEXT: v_mul_f32_e32 v14, v13, v15 @@ -8320,7 +8320,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cvt_f32_f16_e64 v5, |v0| ; GFX9-NEXT: v_cmp_ngt_f32_e32 vcc, v6, v5 ; GFX9-NEXT: s_cbranch_vccz .LBB10_2 -; GFX9-NEXT: ; %bb.1: ; %frem.else +; GFX9-NEXT: ; %bb.1: ; %frem.else86 ; GFX9-NEXT: s_movk_i32 s2, 0x7fff ; GFX9-NEXT: v_bfi_b32 v4, s2, 0, v2 ; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, v6, v5 @@ -8329,7 +8329,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB10_8 ; GFX9-NEXT: .LBB10_2: ; GFX9-NEXT: ; implicit-def: $vgpr4 -; GFX9-NEXT: .LBB10_3: ; %frem.compute +; GFX9-NEXT: .LBB10_3: ; %frem.compute85 ; GFX9-NEXT: v_frexp_exp_i32_f32_e32 v9, v6 ; GFX9-NEXT: v_frexp_mant_f32_e32 v4, v6 ; GFX9-NEXT: v_frexp_mant_f32_e32 v6, v5 @@ -8354,10 +8354,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 12, v6 ; GFX9-NEXT: v_div_fixup_f32 v8, v8, v5, 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB10_7 -; GFX9-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX9-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; GFX9-NEXT: v_sub_u32_e32 v6, v9, v10 ; GFX9-NEXT: v_add_u32_e32 v6, 11, v6 -; GFX9-NEXT: .LBB10_5: ; %frem.loop_body +; GFX9-NEXT: .LBB10_5: ; %frem.loop_body93 ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v9, v7 ; GFX9-NEXT: v_mul_f32_e32 v7, v9, v8 @@ -8372,7 +8372,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_cbranch_vccnz .LBB10_5 ; GFX9-NEXT: ; %bb.6: ; %Flow133 ; GFX9-NEXT: v_mov_b32_e32 v7, v9 -; GFX9-NEXT: .LBB10_7: ; %frem.loop_exit +; GFX9-NEXT: .LBB10_7: ; %frem.loop_exit94 ; GFX9-NEXT: v_add_u32_e32 v6, -10, v6 ; GFX9-NEXT: v_ldexp_f32 v6, v7, v6 ; GFX9-NEXT: v_mul_f32_e32 v7, v6, v8 @@ -8391,7 +8391,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cvt_f32_f16_sdwa v7, |v0| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; GFX9-NEXT: v_cmp_ngt_f32_e32 vcc, v8, v7 ; GFX9-NEXT: s_cbranch_vccz .LBB10_10 -; GFX9-NEXT: ; %bb.9: ; %frem.else20 +; GFX9-NEXT: ; %bb.9: ; %frem.else53 ; GFX9-NEXT: s_movk_i32 s2, 0x7fff ; GFX9-NEXT: v_bfi_b32 v6, s2, 0, v5 ; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, v8, v7 @@ -8400,7 +8400,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB10_16 ; GFX9-NEXT: .LBB10_10: ; GFX9-NEXT: ; implicit-def: $vgpr6 -; GFX9-NEXT: .LBB10_11: ; %frem.compute19 +; GFX9-NEXT: .LBB10_11: ; %frem.compute52 ; GFX9-NEXT: v_frexp_exp_i32_f32_e32 v11, v8 ; GFX9-NEXT: v_frexp_mant_f32_e32 v6, v8 ; GFX9-NEXT: v_frexp_mant_f32_e32 v8, v7 @@ -8425,10 +8425,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 12, v8 ; GFX9-NEXT: v_div_fixup_f32 v10, v10, v7, 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB10_15 -; GFX9-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX9-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; GFX9-NEXT: v_sub_u32_e32 v8, v11, v12 ; GFX9-NEXT: v_add_u32_e32 v8, 11, v8 -; GFX9-NEXT: .LBB10_13: ; %frem.loop_body27 +; GFX9-NEXT: .LBB10_13: ; %frem.loop_body60 ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v11, v9 ; GFX9-NEXT: v_mul_f32_e32 v9, v11, v10 @@ -8443,7 +8443,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_cbranch_vccnz .LBB10_13 ; GFX9-NEXT: ; %bb.14: ; %Flow129 ; GFX9-NEXT: v_mov_b32_e32 v9, v11 -; GFX9-NEXT: .LBB10_15: ; %frem.loop_exit28 +; GFX9-NEXT: .LBB10_15: ; %frem.loop_exit61 ; GFX9-NEXT: v_add_u32_e32 v8, -10, v8 ; GFX9-NEXT: v_ldexp_f32 v8, v9, v8 ; GFX9-NEXT: v_mul_f32_e32 v9, v8, v10 @@ -8461,7 +8461,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cvt_f32_f16_e64 v8, |v1| ; GFX9-NEXT: v_cmp_ngt_f32_e32 vcc, v9, v8 ; GFX9-NEXT: s_cbranch_vccz .LBB10_18 -; GFX9-NEXT: ; %bb.17: ; %frem.else53 +; GFX9-NEXT: ; %bb.17: ; %frem.else20 ; GFX9-NEXT: s_movk_i32 s2, 0x7fff ; GFX9-NEXT: v_bfi_b32 v7, s2, 0, v3 ; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, v9, v8 @@ -8470,7 +8470,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB10_24 ; GFX9-NEXT: .LBB10_18: ; GFX9-NEXT: ; implicit-def: $vgpr7 -; GFX9-NEXT: .LBB10_19: ; %frem.compute52 +; GFX9-NEXT: .LBB10_19: ; %frem.compute19 ; GFX9-NEXT: v_frexp_exp_i32_f32_e32 v12, v9 ; GFX9-NEXT: v_frexp_mant_f32_e32 v7, v9 ; GFX9-NEXT: v_frexp_mant_f32_e32 v9, v8 @@ -8495,10 +8495,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 12, v9 ; GFX9-NEXT: v_div_fixup_f32 v11, v11, v8, 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB10_23 -; GFX9-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; GFX9-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; GFX9-NEXT: v_sub_u32_e32 v9, v12, v13 ; GFX9-NEXT: v_add_u32_e32 v9, 11, v9 -; GFX9-NEXT: .LBB10_21: ; %frem.loop_body60 +; GFX9-NEXT: .LBB10_21: ; %frem.loop_body27 ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v12, v10 ; GFX9-NEXT: v_mul_f32_e32 v10, v12, v11 @@ -8513,7 +8513,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_cbranch_vccnz .LBB10_21 ; GFX9-NEXT: ; %bb.22: ; %Flow125 ; GFX9-NEXT: v_mov_b32_e32 v10, v12 -; GFX9-NEXT: .LBB10_23: ; %frem.loop_exit61 +; GFX9-NEXT: .LBB10_23: ; %frem.loop_exit28 ; GFX9-NEXT: v_add_u32_e32 v9, -10, v9 ; GFX9-NEXT: v_ldexp_f32 v9, v10, v9 ; GFX9-NEXT: v_mul_f32_e32 v10, v9, v11 @@ -8532,7 +8532,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cvt_f32_f16_sdwa v10, |v1| dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 ; GFX9-NEXT: v_cmp_ngt_f32_e32 vcc, v11, v10 ; GFX9-NEXT: s_cbranch_vccz .LBB10_26 -; GFX9-NEXT: ; %bb.25: ; %frem.else86 +; GFX9-NEXT: ; %bb.25: ; %frem.else ; GFX9-NEXT: s_movk_i32 s2, 0x7fff ; GFX9-NEXT: v_bfi_b32 v9, s2, 0, v8 ; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, v11, v10 @@ -8541,7 +8541,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB10_32 ; GFX9-NEXT: .LBB10_26: ; GFX9-NEXT: ; implicit-def: $vgpr9 -; GFX9-NEXT: .LBB10_27: ; %frem.compute85 +; GFX9-NEXT: .LBB10_27: ; %frem.compute ; GFX9-NEXT: v_frexp_exp_i32_f32_e32 v14, v11 ; GFX9-NEXT: v_frexp_mant_f32_e32 v9, v11 ; GFX9-NEXT: v_frexp_mant_f32_e32 v11, v10 @@ -8566,10 +8566,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 12, v11 ; GFX9-NEXT: v_div_fixup_f32 v13, v13, v10, 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB10_31 -; GFX9-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; GFX9-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX9-NEXT: v_sub_u32_e32 v11, v14, v15 ; GFX9-NEXT: v_add_u32_e32 v11, 11, v11 -; GFX9-NEXT: .LBB10_29: ; %frem.loop_body93 +; GFX9-NEXT: .LBB10_29: ; %frem.loop_body ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v14, v12 ; GFX9-NEXT: v_mul_f32_e32 v12, v14, v13 @@ -8584,7 +8584,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_cbranch_vccnz .LBB10_29 ; GFX9-NEXT: ; %bb.30: ; %Flow ; GFX9-NEXT: v_mov_b32_e32 v12, v14 -; GFX9-NEXT: .LBB10_31: ; %frem.loop_exit94 +; GFX9-NEXT: .LBB10_31: ; %frem.loop_exit ; GFX9-NEXT: v_add_u32_e32 v11, -10, v11 ; GFX9-NEXT: v_ldexp_f32 v11, v12, v11 ; GFX9-NEXT: v_mul_f32_e32 v12, v11, v13 @@ -8640,7 +8640,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cvt_f32_f16_e64 v5, |v0| ; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v6, v5 ; GFX10-NEXT: s_cbranch_vccz .LBB10_2 -; GFX10-NEXT: ; %bb.1: ; %frem.else +; GFX10-NEXT: ; %bb.1: ; %frem.else86 ; GFX10-NEXT: v_bfi_b32 v4, 0x7fff, 0, v2 ; GFX10-NEXT: v_cmp_eq_f32_e32 vcc_lo, v6, v5 ; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v4, vcc_lo @@ -8648,7 +8648,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB10_8 ; GFX10-NEXT: .LBB10_2: ; GFX10-NEXT: ; implicit-def: $vgpr4 -; GFX10-NEXT: .LBB10_3: ; %frem.compute +; GFX10-NEXT: .LBB10_3: ; %frem.compute85 ; GFX10-NEXT: v_frexp_mant_f32_e32 v4, v6 ; GFX10-NEXT: v_frexp_mant_f32_e32 v8, v5 ; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v7, v6 @@ -8675,10 +8675,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v8 ; GFX10-NEXT: v_div_fixup_f32 v7, v7, v5, 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB10_7 -; GFX10-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX10-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 11 -; GFX10-NEXT: .LBB10_5: ; %frem.loop_body +; GFX10-NEXT: .LBB10_5: ; %frem.loop_body93 ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v9, v6 ; GFX10-NEXT: s_add_i32 s2, s2, -11 @@ -8694,7 +8694,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: ; %bb.6: ; %Flow133 ; GFX10-NEXT: v_mov_b32_e32 v8, s2 ; GFX10-NEXT: v_mov_b32_e32 v6, v9 -; GFX10-NEXT: .LBB10_7: ; %frem.loop_exit +; GFX10-NEXT: .LBB10_7: ; %frem.loop_exit94 ; GFX10-NEXT: v_add_nc_u32_e32 v8, -10, v8 ; GFX10-NEXT: v_ldexp_f32 v6, v6, v8 ; GFX10-NEXT: v_mul_f32_e32 v7, v6, v7 @@ -8712,7 +8712,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cvt_f32_f16_e64 v8, |v5| ; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v8, v7 ; GFX10-NEXT: s_cbranch_vccz .LBB10_10 -; GFX10-NEXT: ; %bb.9: ; %frem.else20 +; GFX10-NEXT: ; %bb.9: ; %frem.else53 ; GFX10-NEXT: v_bfi_b32 v6, 0x7fff, 0, v5 ; GFX10-NEXT: v_cmp_eq_f32_e32 vcc_lo, v8, v7 ; GFX10-NEXT: v_cndmask_b32_e32 v6, v5, v6, vcc_lo @@ -8720,7 +8720,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB10_16 ; GFX10-NEXT: .LBB10_10: ; GFX10-NEXT: ; implicit-def: $vgpr6 -; GFX10-NEXT: .LBB10_11: ; %frem.compute19 +; GFX10-NEXT: .LBB10_11: ; %frem.compute52 ; GFX10-NEXT: v_frexp_mant_f32_e32 v6, v8 ; GFX10-NEXT: v_frexp_mant_f32_e32 v10, v7 ; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v9, v8 @@ -8747,10 +8747,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v10 ; GFX10-NEXT: v_div_fixup_f32 v9, v9, v7, 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB10_15 -; GFX10-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX10-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 11 -; GFX10-NEXT: .LBB10_13: ; %frem.loop_body27 +; GFX10-NEXT: .LBB10_13: ; %frem.loop_body60 ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v11, v8 ; GFX10-NEXT: s_add_i32 s2, s2, -11 @@ -8766,7 +8766,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: ; %bb.14: ; %Flow129 ; GFX10-NEXT: v_mov_b32_e32 v10, s2 ; GFX10-NEXT: v_mov_b32_e32 v8, v11 -; GFX10-NEXT: .LBB10_15: ; %frem.loop_exit28 +; GFX10-NEXT: .LBB10_15: ; %frem.loop_exit61 ; GFX10-NEXT: v_add_nc_u32_e32 v10, -10, v10 ; GFX10-NEXT: v_ldexp_f32 v8, v8, v10 ; GFX10-NEXT: v_mul_f32_e32 v9, v8, v9 @@ -8783,7 +8783,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cvt_f32_f16_e64 v8, |v1| ; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v9, v8 ; GFX10-NEXT: s_cbranch_vccz .LBB10_18 -; GFX10-NEXT: ; %bb.17: ; %frem.else53 +; GFX10-NEXT: ; %bb.17: ; %frem.else20 ; GFX10-NEXT: v_bfi_b32 v7, 0x7fff, 0, v3 ; GFX10-NEXT: v_cmp_eq_f32_e32 vcc_lo, v9, v8 ; GFX10-NEXT: v_cndmask_b32_e32 v7, v3, v7, vcc_lo @@ -8791,7 +8791,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB10_24 ; GFX10-NEXT: .LBB10_18: ; GFX10-NEXT: ; implicit-def: $vgpr7 -; GFX10-NEXT: .LBB10_19: ; %frem.compute52 +; GFX10-NEXT: .LBB10_19: ; %frem.compute19 ; GFX10-NEXT: v_frexp_mant_f32_e32 v7, v9 ; GFX10-NEXT: v_frexp_mant_f32_e32 v11, v8 ; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v10, v9 @@ -8818,10 +8818,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v11 ; GFX10-NEXT: v_div_fixup_f32 v10, v10, v8, 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB10_23 -; GFX10-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; GFX10-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 11 -; GFX10-NEXT: .LBB10_21: ; %frem.loop_body60 +; GFX10-NEXT: .LBB10_21: ; %frem.loop_body27 ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v12, v9 ; GFX10-NEXT: s_add_i32 s2, s2, -11 @@ -8837,7 +8837,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: ; %bb.22: ; %Flow125 ; GFX10-NEXT: v_mov_b32_e32 v11, s2 ; GFX10-NEXT: v_mov_b32_e32 v9, v12 -; GFX10-NEXT: .LBB10_23: ; %frem.loop_exit61 +; GFX10-NEXT: .LBB10_23: ; %frem.loop_exit28 ; GFX10-NEXT: v_add_nc_u32_e32 v11, -10, v11 ; GFX10-NEXT: v_ldexp_f32 v9, v9, v11 ; GFX10-NEXT: v_mul_f32_e32 v10, v9, v10 @@ -8855,7 +8855,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cvt_f32_f16_e64 v11, |v8| ; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v11, v10 ; GFX10-NEXT: s_cbranch_vccz .LBB10_26 -; GFX10-NEXT: ; %bb.25: ; %frem.else86 +; GFX10-NEXT: ; %bb.25: ; %frem.else ; GFX10-NEXT: v_bfi_b32 v9, 0x7fff, 0, v8 ; GFX10-NEXT: v_cmp_eq_f32_e32 vcc_lo, v11, v10 ; GFX10-NEXT: v_cndmask_b32_e32 v9, v8, v9, vcc_lo @@ -8863,7 +8863,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB10_32 ; GFX10-NEXT: .LBB10_26: ; GFX10-NEXT: ; implicit-def: $vgpr9 -; GFX10-NEXT: .LBB10_27: ; %frem.compute85 +; GFX10-NEXT: .LBB10_27: ; %frem.compute ; GFX10-NEXT: v_frexp_mant_f32_e32 v9, v11 ; GFX10-NEXT: v_frexp_mant_f32_e32 v13, v10 ; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v12, v11 @@ -8890,10 +8890,10 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v13 ; GFX10-NEXT: v_div_fixup_f32 v12, v12, v10, 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB10_31 -; GFX10-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; GFX10-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 11 -; GFX10-NEXT: .LBB10_29: ; %frem.loop_body93 +; GFX10-NEXT: .LBB10_29: ; %frem.loop_body ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v14, v11 ; GFX10-NEXT: s_add_i32 s2, s2, -11 @@ -8909,7 +8909,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: ; %bb.30: ; %Flow ; GFX10-NEXT: v_mov_b32_e32 v13, s2 ; GFX10-NEXT: v_mov_b32_e32 v11, v14 -; GFX10-NEXT: .LBB10_31: ; %frem.loop_exit94 +; GFX10-NEXT: .LBB10_31: ; %frem.loop_exit ; GFX10-NEXT: v_add_nc_u32_e32 v13, -10, v13 ; GFX10-NEXT: v_ldexp_f32 v11, v11, v13 ; GFX10-NEXT: v_mul_f32_e32 v12, v11, v12 @@ -8963,7 +8963,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v6, v5 ; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB10_2 -; GFX11-TRUE16-NEXT: ; %bb.1: ; %frem.else +; GFX11-TRUE16-NEXT: ; %bb.1: ; %frem.else86 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v0.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, 0 ; GFX11-TRUE16-NEXT: v_cmp_eq_f32_e32 vcc_lo, v6, v5 @@ -8974,7 +8974,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_branch .LBB10_8 ; GFX11-TRUE16-NEXT: .LBB10_2: ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4 -; GFX11-TRUE16-NEXT: .LBB10_3: ; %frem.compute +; GFX11-TRUE16-NEXT: .LBB10_3: ; %frem.compute85 ; GFX11-TRUE16-NEXT: v_frexp_mant_f32_e32 v4, v6 ; GFX11-TRUE16-NEXT: v_frexp_mant_f32_e32 v8, v5 ; GFX11-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v7, v6 @@ -9010,11 +9010,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_div_fixup_f32 v7, v7, v5, 1.0 ; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB10_7 -; GFX11-TRUE16-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX11-TRUE16-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; GFX11-TRUE16-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 11 -; GFX11-TRUE16-NEXT: .LBB10_5: ; %frem.loop_body +; GFX11-TRUE16-NEXT: .LBB10_5: ; %frem.loop_body93 ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v9, v6 @@ -9034,7 +9034,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: ; %bb.6: ; %Flow133 ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v8, s2 ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v9 -; GFX11-TRUE16-NEXT: .LBB10_7: ; %frem.loop_exit +; GFX11-TRUE16-NEXT: .LBB10_7: ; %frem.loop_exit94 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v8, -10, v8 ; GFX11-TRUE16-NEXT: v_ldexp_f32 v6, v6, v8 @@ -9061,7 +9061,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v9, v8 ; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB10_10 -; GFX11-TRUE16-NEXT: ; %bb.9: ; %frem.else20 +; GFX11-TRUE16-NEXT: ; %bb.9: ; %frem.else53 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, 0 ; GFX11-TRUE16-NEXT: v_cmp_eq_f32_e32 vcc_lo, v9, v8 @@ -9072,7 +9072,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_branch .LBB10_16 ; GFX11-TRUE16-NEXT: .LBB10_10: ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr7 -; GFX11-TRUE16-NEXT: .LBB10_11: ; %frem.compute19 +; GFX11-TRUE16-NEXT: .LBB10_11: ; %frem.compute52 ; GFX11-TRUE16-NEXT: v_frexp_mant_f32_e32 v7, v9 ; GFX11-TRUE16-NEXT: v_frexp_mant_f32_e32 v11, v8 ; GFX11-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v10, v9 @@ -9108,11 +9108,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_div_fixup_f32 v10, v10, v8, 1.0 ; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB10_15 -; GFX11-TRUE16-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX11-TRUE16-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; GFX11-TRUE16-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 11 -; GFX11-TRUE16-NEXT: .LBB10_13: ; %frem.loop_body27 +; GFX11-TRUE16-NEXT: .LBB10_13: ; %frem.loop_body60 ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v12, v9 @@ -9132,7 +9132,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: ; %bb.14: ; %Flow129 ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v11, s2 ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v9, v12 -; GFX11-TRUE16-NEXT: .LBB10_15: ; %frem.loop_exit28 +; GFX11-TRUE16-NEXT: .LBB10_15: ; %frem.loop_exit61 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v11, -10, v11 ; GFX11-TRUE16-NEXT: v_ldexp_f32 v9, v9, v11 @@ -9156,7 +9156,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v10, v9 ; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB10_18 -; GFX11-TRUE16-NEXT: ; %bb.17: ; %frem.else53 +; GFX11-TRUE16-NEXT: ; %bb.17: ; %frem.else20 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v1.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, 0 ; GFX11-TRUE16-NEXT: v_cmp_eq_f32_e32 vcc_lo, v10, v9 @@ -9167,7 +9167,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_branch .LBB10_24 ; GFX11-TRUE16-NEXT: .LBB10_18: ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr8 -; GFX11-TRUE16-NEXT: .LBB10_19: ; %frem.compute52 +; GFX11-TRUE16-NEXT: .LBB10_19: ; %frem.compute19 ; GFX11-TRUE16-NEXT: v_frexp_mant_f32_e32 v8, v10 ; GFX11-TRUE16-NEXT: v_frexp_mant_f32_e32 v12, v9 ; GFX11-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v11, v10 @@ -9203,11 +9203,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_div_fixup_f32 v11, v11, v9, 1.0 ; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB10_23 -; GFX11-TRUE16-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; GFX11-TRUE16-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; GFX11-TRUE16-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 11 -; GFX11-TRUE16-NEXT: .LBB10_21: ; %frem.loop_body60 +; GFX11-TRUE16-NEXT: .LBB10_21: ; %frem.loop_body27 ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v10 @@ -9227,7 +9227,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: ; %bb.22: ; %Flow125 ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v12, s2 ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v10, v13 -; GFX11-TRUE16-NEXT: .LBB10_23: ; %frem.loop_exit61 +; GFX11-TRUE16-NEXT: .LBB10_23: ; %frem.loop_exit28 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v12, -10, v12 ; GFX11-TRUE16-NEXT: v_ldexp_f32 v10, v10, v12 @@ -9254,7 +9254,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v13, v12 ; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB10_26 -; GFX11-TRUE16-NEXT: ; %bb.25: ; %frem.else86 +; GFX11-TRUE16-NEXT: ; %bb.25: ; %frem.else ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v9.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, 0 ; GFX11-TRUE16-NEXT: v_cmp_eq_f32_e32 vcc_lo, v13, v12 @@ -9265,7 +9265,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_branch .LBB10_32 ; GFX11-TRUE16-NEXT: .LBB10_26: ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr11 -; GFX11-TRUE16-NEXT: .LBB10_27: ; %frem.compute85 +; GFX11-TRUE16-NEXT: .LBB10_27: ; %frem.compute ; GFX11-TRUE16-NEXT: v_frexp_mant_f32_e32 v11, v13 ; GFX11-TRUE16-NEXT: v_frexp_mant_f32_e32 v15, v12 ; GFX11-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v14, v13 @@ -9301,11 +9301,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_div_fixup_f32 v14, v14, v12, 1.0 ; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB10_31 -; GFX11-TRUE16-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; GFX11-TRUE16-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX11-TRUE16-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 11 -; GFX11-TRUE16-NEXT: .LBB10_29: ; %frem.loop_body93 +; GFX11-TRUE16-NEXT: .LBB10_29: ; %frem.loop_body ; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v16, v13 @@ -9325,7 +9325,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-TRUE16-NEXT: ; %bb.30: ; %Flow ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v15, s2 ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v13, v16 -; GFX11-TRUE16-NEXT: .LBB10_31: ; %frem.loop_exit94 +; GFX11-TRUE16-NEXT: .LBB10_31: ; %frem.loop_exit ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v15, -10, v15 ; GFX11-TRUE16-NEXT: v_ldexp_f32 v13, v13, v15 @@ -9388,7 +9388,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v6, v5 ; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB10_2 -; GFX11-FAKE16-NEXT: ; %bb.1: ; %frem.else +; GFX11-FAKE16-NEXT: ; %bb.1: ; %frem.else86 ; GFX11-FAKE16-NEXT: v_bfi_b32 v4, 0x7fff, 0, v0 ; GFX11-FAKE16-NEXT: v_cmp_eq_f32_e32 vcc_lo, v6, v5 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) @@ -9397,7 +9397,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_branch .LBB10_8 ; GFX11-FAKE16-NEXT: .LBB10_2: ; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4 -; GFX11-FAKE16-NEXT: .LBB10_3: ; %frem.compute +; GFX11-FAKE16-NEXT: .LBB10_3: ; %frem.compute85 ; GFX11-FAKE16-NEXT: v_frexp_mant_f32_e32 v4, v6 ; GFX11-FAKE16-NEXT: v_frexp_mant_f32_e32 v8, v5 ; GFX11-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v7, v6 @@ -9433,11 +9433,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_div_fixup_f32 v7, v7, v5, 1.0 ; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB10_7 -; GFX11-FAKE16-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX11-FAKE16-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; GFX11-FAKE16-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 11 -; GFX11-FAKE16-NEXT: .LBB10_5: ; %frem.loop_body +; GFX11-FAKE16-NEXT: .LBB10_5: ; %frem.loop_body93 ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v9, v6 @@ -9457,7 +9457,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: ; %bb.6: ; %Flow133 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v8, s2 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v9 -; GFX11-FAKE16-NEXT: .LBB10_7: ; %frem.loop_exit +; GFX11-FAKE16-NEXT: .LBB10_7: ; %frem.loop_exit94 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v8, -10, v8 ; GFX11-FAKE16-NEXT: v_ldexp_f32 v6, v6, v8 @@ -9483,7 +9483,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v9, v8 ; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB10_10 -; GFX11-FAKE16-NEXT: ; %bb.9: ; %frem.else20 +; GFX11-FAKE16-NEXT: ; %bb.9: ; %frem.else53 ; GFX11-FAKE16-NEXT: v_bfi_b32 v7, 0x7fff, 0, v5 ; GFX11-FAKE16-NEXT: v_cmp_eq_f32_e32 vcc_lo, v9, v8 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) @@ -9492,7 +9492,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_branch .LBB10_16 ; GFX11-FAKE16-NEXT: .LBB10_10: ; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr7 -; GFX11-FAKE16-NEXT: .LBB10_11: ; %frem.compute19 +; GFX11-FAKE16-NEXT: .LBB10_11: ; %frem.compute52 ; GFX11-FAKE16-NEXT: v_frexp_mant_f32_e32 v7, v9 ; GFX11-FAKE16-NEXT: v_frexp_mant_f32_e32 v11, v8 ; GFX11-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v10, v9 @@ -9528,11 +9528,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_div_fixup_f32 v10, v10, v8, 1.0 ; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB10_15 -; GFX11-FAKE16-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX11-FAKE16-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; GFX11-FAKE16-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 11 -; GFX11-FAKE16-NEXT: .LBB10_13: ; %frem.loop_body27 +; GFX11-FAKE16-NEXT: .LBB10_13: ; %frem.loop_body60 ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v12, v9 @@ -9552,7 +9552,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: ; %bb.14: ; %Flow129 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v11, s2 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v9, v12 -; GFX11-FAKE16-NEXT: .LBB10_15: ; %frem.loop_exit28 +; GFX11-FAKE16-NEXT: .LBB10_15: ; %frem.loop_exit61 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v11, -10, v11 ; GFX11-FAKE16-NEXT: v_ldexp_f32 v9, v9, v11 @@ -9575,7 +9575,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v10, v9 ; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB10_18 -; GFX11-FAKE16-NEXT: ; %bb.17: ; %frem.else53 +; GFX11-FAKE16-NEXT: ; %bb.17: ; %frem.else20 ; GFX11-FAKE16-NEXT: v_bfi_b32 v8, 0x7fff, 0, v1 ; GFX11-FAKE16-NEXT: v_cmp_eq_f32_e32 vcc_lo, v10, v9 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) @@ -9584,7 +9584,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_branch .LBB10_24 ; GFX11-FAKE16-NEXT: .LBB10_18: ; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr8 -; GFX11-FAKE16-NEXT: .LBB10_19: ; %frem.compute52 +; GFX11-FAKE16-NEXT: .LBB10_19: ; %frem.compute19 ; GFX11-FAKE16-NEXT: v_frexp_mant_f32_e32 v8, v10 ; GFX11-FAKE16-NEXT: v_frexp_mant_f32_e32 v12, v9 ; GFX11-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v11, v10 @@ -9620,11 +9620,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_div_fixup_f32 v11, v11, v9, 1.0 ; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB10_23 -; GFX11-FAKE16-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; GFX11-FAKE16-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; GFX11-FAKE16-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 11 -; GFX11-FAKE16-NEXT: .LBB10_21: ; %frem.loop_body60 +; GFX11-FAKE16-NEXT: .LBB10_21: ; %frem.loop_body27 ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v13, v10 @@ -9644,7 +9644,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: ; %bb.22: ; %Flow125 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v12, s2 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v10, v13 -; GFX11-FAKE16-NEXT: .LBB10_23: ; %frem.loop_exit61 +; GFX11-FAKE16-NEXT: .LBB10_23: ; %frem.loop_exit28 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v12, -10, v12 ; GFX11-FAKE16-NEXT: v_ldexp_f32 v10, v10, v12 @@ -9670,7 +9670,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v13, v12 ; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB10_26 -; GFX11-FAKE16-NEXT: ; %bb.25: ; %frem.else86 +; GFX11-FAKE16-NEXT: ; %bb.25: ; %frem.else ; GFX11-FAKE16-NEXT: v_bfi_b32 v11, 0x7fff, 0, v9 ; GFX11-FAKE16-NEXT: v_cmp_eq_f32_e32 vcc_lo, v13, v12 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) @@ -9679,7 +9679,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_branch .LBB10_32 ; GFX11-FAKE16-NEXT: .LBB10_26: ; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr11 -; GFX11-FAKE16-NEXT: .LBB10_27: ; %frem.compute85 +; GFX11-FAKE16-NEXT: .LBB10_27: ; %frem.compute ; GFX11-FAKE16-NEXT: v_frexp_mant_f32_e32 v11, v13 ; GFX11-FAKE16-NEXT: v_frexp_mant_f32_e32 v15, v12 ; GFX11-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v14, v13 @@ -9715,11 +9715,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-FAKE16-NEXT: v_div_fixup_f32 v14, v14, v12, 1.0 ; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB10_31 -; GFX11-FAKE16-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; GFX11-FAKE16-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX11-FAKE16-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 11 -; GFX11-FAKE16-NEXT: .LBB10_29: ; %frem.loop_body93 +; GFX11-FAKE16-NEXT: .LBB10_29: ; %frem.loop_body ; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v16, v13 @@ -9739,7 +9739,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-FAKE16-NEXT: ; %bb.30: ; %Flow ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v15, s2 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v13, v16 -; GFX11-FAKE16-NEXT: .LBB10_31: ; %frem.loop_exit94 +; GFX11-FAKE16-NEXT: .LBB10_31: ; %frem.loop_exit ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v15, -10, v15 ; GFX11-FAKE16-NEXT: v_ldexp_f32 v13, v13, v15 @@ -9804,7 +9804,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1150-TRUE16-NEXT: s_cmp_ngt_f32 s8, s6 ; GFX1150-TRUE16-NEXT: s_cbranch_scc0 .LBB10_2 -; GFX1150-TRUE16-NEXT: ; %bb.1: ; %frem.else +; GFX1150-TRUE16-NEXT: ; %bb.1: ; %frem.else86 ; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v0.l, s5 ; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v1.l, 0 ; GFX1150-TRUE16-NEXT: s_cmp_eq_f32 s8, s6 @@ -9816,7 +9816,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: s_branch .LBB10_8 ; GFX1150-TRUE16-NEXT: .LBB10_2: ; GFX1150-TRUE16-NEXT: ; implicit-def: $vgpr0 -; GFX1150-TRUE16-NEXT: .LBB10_3: ; %frem.compute +; GFX1150-TRUE16-NEXT: .LBB10_3: ; %frem.compute85 ; GFX1150-TRUE16-NEXT: v_frexp_mant_f32_e32 v1, s6 ; GFX1150-TRUE16-NEXT: v_frexp_mant_f32_e32 v0, s8 ; GFX1150-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v3, s8 @@ -9851,11 +9851,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v4 ; GFX1150-TRUE16-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; GFX1150-TRUE16-NEXT: s_cbranch_vccnz .LBB10_7 -; GFX1150-TRUE16-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1150-TRUE16-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; GFX1150-TRUE16-NEXT: s_sub_i32 s6, s8, s6 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-TRUE16-NEXT: s_add_i32 s6, s6, 11 -; GFX1150-TRUE16-NEXT: .LBB10_5: ; %frem.loop_body +; GFX1150-TRUE16-NEXT: .LBB10_5: ; %frem.loop_body93 ; GFX1150-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v5, v2 @@ -9877,7 +9877,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: ; %bb.6: ; %Flow133 ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v4, s6 ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v2, v5 -; GFX1150-TRUE16-NEXT: .LBB10_7: ; %frem.loop_exit +; GFX1150-TRUE16-NEXT: .LBB10_7: ; %frem.loop_exit94 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-TRUE16-NEXT: v_add_nc_u32_e32 v4, -10, v4 ; GFX1150-TRUE16-NEXT: v_ldexp_f32 v2, v2, v4 @@ -9907,7 +9907,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1150-TRUE16-NEXT: s_cmp_ngt_f32 s10, s9 ; GFX1150-TRUE16-NEXT: s_cbranch_scc0 .LBB10_10 -; GFX1150-TRUE16-NEXT: ; %bb.9: ; %frem.else20 +; GFX1150-TRUE16-NEXT: ; %bb.9: ; %frem.else53 ; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v1.l, s8 ; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0 ; GFX1150-TRUE16-NEXT: s_cmp_eq_f32 s10, s9 @@ -9919,7 +9919,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: s_branch .LBB10_16 ; GFX1150-TRUE16-NEXT: .LBB10_10: ; GFX1150-TRUE16-NEXT: ; implicit-def: $vgpr1 -; GFX1150-TRUE16-NEXT: .LBB10_11: ; %frem.compute19 +; GFX1150-TRUE16-NEXT: .LBB10_11: ; %frem.compute52 ; GFX1150-TRUE16-NEXT: v_frexp_mant_f32_e32 v2, s9 ; GFX1150-TRUE16-NEXT: v_frexp_mant_f32_e32 v1, s10 ; GFX1150-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v4, s10 @@ -9954,11 +9954,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v5 ; GFX1150-TRUE16-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; GFX1150-TRUE16-NEXT: s_cbranch_vccnz .LBB10_15 -; GFX1150-TRUE16-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX1150-TRUE16-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; GFX1150-TRUE16-NEXT: s_sub_i32 s9, s10, s9 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-TRUE16-NEXT: s_add_i32 s9, s9, 11 -; GFX1150-TRUE16-NEXT: .LBB10_13: ; %frem.loop_body27 +; GFX1150-TRUE16-NEXT: .LBB10_13: ; %frem.loop_body60 ; GFX1150-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v6, v3 @@ -9980,7 +9980,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: ; %bb.14: ; %Flow129 ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v5, s9 ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v3, v6 -; GFX1150-TRUE16-NEXT: .LBB10_15: ; %frem.loop_exit28 +; GFX1150-TRUE16-NEXT: .LBB10_15: ; %frem.loop_exit61 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-TRUE16-NEXT: v_add_nc_u32_e32 v5, -10, v5 ; GFX1150-TRUE16-NEXT: v_ldexp_f32 v3, v3, v5 @@ -10008,7 +10008,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1150-TRUE16-NEXT: s_cmp_ngt_f32 s10, s9 ; GFX1150-TRUE16-NEXT: s_cbranch_scc0 .LBB10_18 -; GFX1150-TRUE16-NEXT: ; %bb.17: ; %frem.else53 +; GFX1150-TRUE16-NEXT: ; %bb.17: ; %frem.else20 ; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v2.l, s7 ; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v3.l, 0 ; GFX1150-TRUE16-NEXT: s_cmp_eq_f32 s10, s9 @@ -10020,7 +10020,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: s_branch .LBB10_24 ; GFX1150-TRUE16-NEXT: .LBB10_18: ; GFX1150-TRUE16-NEXT: ; implicit-def: $vgpr2 -; GFX1150-TRUE16-NEXT: .LBB10_19: ; %frem.compute52 +; GFX1150-TRUE16-NEXT: .LBB10_19: ; %frem.compute19 ; GFX1150-TRUE16-NEXT: v_frexp_mant_f32_e32 v3, s9 ; GFX1150-TRUE16-NEXT: v_frexp_mant_f32_e32 v2, s10 ; GFX1150-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v5, s10 @@ -10055,11 +10055,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v6 ; GFX1150-TRUE16-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; GFX1150-TRUE16-NEXT: s_cbranch_vccnz .LBB10_23 -; GFX1150-TRUE16-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; GFX1150-TRUE16-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; GFX1150-TRUE16-NEXT: s_sub_i32 s9, s10, s9 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-TRUE16-NEXT: s_add_i32 s9, s9, 11 -; GFX1150-TRUE16-NEXT: .LBB10_21: ; %frem.loop_body60 +; GFX1150-TRUE16-NEXT: .LBB10_21: ; %frem.loop_body27 ; GFX1150-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v7, v4 @@ -10081,7 +10081,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: ; %bb.22: ; %Flow125 ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v6, s9 ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v4, v7 -; GFX1150-TRUE16-NEXT: .LBB10_23: ; %frem.loop_exit61 +; GFX1150-TRUE16-NEXT: .LBB10_23: ; %frem.loop_exit28 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-TRUE16-NEXT: v_add_nc_u32_e32 v6, -10, v6 ; GFX1150-TRUE16-NEXT: v_ldexp_f32 v4, v4, v6 @@ -10111,7 +10111,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1150-TRUE16-NEXT: s_cmp_ngt_f32 s12, s11 ; GFX1150-TRUE16-NEXT: s_cbranch_scc0 .LBB10_26 -; GFX1150-TRUE16-NEXT: ; %bb.25: ; %frem.else86 +; GFX1150-TRUE16-NEXT: ; %bb.25: ; %frem.else ; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v3.l, s10 ; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v4.l, 0 ; GFX1150-TRUE16-NEXT: s_cmp_eq_f32 s12, s11 @@ -10123,7 +10123,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: s_branch .LBB10_32 ; GFX1150-TRUE16-NEXT: .LBB10_26: ; GFX1150-TRUE16-NEXT: ; implicit-def: $vgpr3 -; GFX1150-TRUE16-NEXT: .LBB10_27: ; %frem.compute85 +; GFX1150-TRUE16-NEXT: .LBB10_27: ; %frem.compute ; GFX1150-TRUE16-NEXT: v_frexp_mant_f32_e32 v4, s11 ; GFX1150-TRUE16-NEXT: v_frexp_mant_f32_e32 v3, s12 ; GFX1150-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v6, s12 @@ -10158,11 +10158,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v7 ; GFX1150-TRUE16-NEXT: v_div_fixup_f32 v6, v6, v4, 1.0 ; GFX1150-TRUE16-NEXT: s_cbranch_vccnz .LBB10_31 -; GFX1150-TRUE16-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; GFX1150-TRUE16-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX1150-TRUE16-NEXT: s_sub_i32 s11, s12, s11 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-TRUE16-NEXT: s_add_i32 s11, s11, 11 -; GFX1150-TRUE16-NEXT: .LBB10_29: ; %frem.loop_body93 +; GFX1150-TRUE16-NEXT: .LBB10_29: ; %frem.loop_body ; GFX1150-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v8, v5 @@ -10184,7 +10184,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-TRUE16-NEXT: ; %bb.30: ; %Flow ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v7, s11 ; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v5, v8 -; GFX1150-TRUE16-NEXT: .LBB10_31: ; %frem.loop_exit94 +; GFX1150-TRUE16-NEXT: .LBB10_31: ; %frem.loop_exit ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-TRUE16-NEXT: v_add_nc_u32_e32 v7, -10, v7 ; GFX1150-TRUE16-NEXT: v_ldexp_f32 v5, v5, v7 @@ -10265,7 +10265,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1150-FAKE16-NEXT: s_cmp_ngt_f32 s8, s6 ; GFX1150-FAKE16-NEXT: s_cbranch_scc0 .LBB10_2 -; GFX1150-FAKE16-NEXT: ; %bb.1: ; %frem.else +; GFX1150-FAKE16-NEXT: ; %bb.1: ; %frem.else86 ; GFX1150-FAKE16-NEXT: s_cmp_eq_f32 s8, s6 ; GFX1150-FAKE16-NEXT: v_bfi_b32 v0, 0x7fff, 0, s5 ; GFX1150-FAKE16-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -10275,7 +10275,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: s_branch .LBB10_8 ; GFX1150-FAKE16-NEXT: .LBB10_2: ; GFX1150-FAKE16-NEXT: ; implicit-def: $vgpr0 -; GFX1150-FAKE16-NEXT: .LBB10_3: ; %frem.compute +; GFX1150-FAKE16-NEXT: .LBB10_3: ; %frem.compute85 ; GFX1150-FAKE16-NEXT: v_frexp_mant_f32_e32 v1, s6 ; GFX1150-FAKE16-NEXT: v_frexp_mant_f32_e32 v0, s8 ; GFX1150-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v3, s8 @@ -10310,11 +10310,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v4 ; GFX1150-FAKE16-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; GFX1150-FAKE16-NEXT: s_cbranch_vccnz .LBB10_7 -; GFX1150-FAKE16-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1150-FAKE16-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; GFX1150-FAKE16-NEXT: s_sub_i32 s6, s8, s6 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-FAKE16-NEXT: s_add_i32 s6, s6, 11 -; GFX1150-FAKE16-NEXT: .LBB10_5: ; %frem.loop_body +; GFX1150-FAKE16-NEXT: .LBB10_5: ; %frem.loop_body93 ; GFX1150-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v5, v2 @@ -10336,7 +10336,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: ; %bb.6: ; %Flow133 ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v4, s6 ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v2, v5 -; GFX1150-FAKE16-NEXT: .LBB10_7: ; %frem.loop_exit +; GFX1150-FAKE16-NEXT: .LBB10_7: ; %frem.loop_exit94 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-FAKE16-NEXT: v_add_nc_u32_e32 v4, -10, v4 ; GFX1150-FAKE16-NEXT: v_ldexp_f32 v2, v2, v4 @@ -10365,7 +10365,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1150-FAKE16-NEXT: s_cmp_ngt_f32 s10, s9 ; GFX1150-FAKE16-NEXT: s_cbranch_scc0 .LBB10_10 -; GFX1150-FAKE16-NEXT: ; %bb.9: ; %frem.else20 +; GFX1150-FAKE16-NEXT: ; %bb.9: ; %frem.else53 ; GFX1150-FAKE16-NEXT: s_cmp_eq_f32 s10, s9 ; GFX1150-FAKE16-NEXT: v_bfi_b32 v1, 0x7fff, 0, s8 ; GFX1150-FAKE16-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -10375,7 +10375,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: s_branch .LBB10_16 ; GFX1150-FAKE16-NEXT: .LBB10_10: ; GFX1150-FAKE16-NEXT: ; implicit-def: $vgpr1 -; GFX1150-FAKE16-NEXT: .LBB10_11: ; %frem.compute19 +; GFX1150-FAKE16-NEXT: .LBB10_11: ; %frem.compute52 ; GFX1150-FAKE16-NEXT: v_frexp_mant_f32_e32 v2, s9 ; GFX1150-FAKE16-NEXT: v_frexp_mant_f32_e32 v1, s10 ; GFX1150-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v4, s10 @@ -10410,11 +10410,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v5 ; GFX1150-FAKE16-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; GFX1150-FAKE16-NEXT: s_cbranch_vccnz .LBB10_15 -; GFX1150-FAKE16-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX1150-FAKE16-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; GFX1150-FAKE16-NEXT: s_sub_i32 s9, s10, s9 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-FAKE16-NEXT: s_add_i32 s9, s9, 11 -; GFX1150-FAKE16-NEXT: .LBB10_13: ; %frem.loop_body27 +; GFX1150-FAKE16-NEXT: .LBB10_13: ; %frem.loop_body60 ; GFX1150-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v6, v3 @@ -10436,7 +10436,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: ; %bb.14: ; %Flow129 ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v5, s9 ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v3, v6 -; GFX1150-FAKE16-NEXT: .LBB10_15: ; %frem.loop_exit28 +; GFX1150-FAKE16-NEXT: .LBB10_15: ; %frem.loop_exit61 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-FAKE16-NEXT: v_add_nc_u32_e32 v5, -10, v5 ; GFX1150-FAKE16-NEXT: v_ldexp_f32 v3, v3, v5 @@ -10463,7 +10463,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1150-FAKE16-NEXT: s_cmp_ngt_f32 s10, s9 ; GFX1150-FAKE16-NEXT: s_cbranch_scc0 .LBB10_18 -; GFX1150-FAKE16-NEXT: ; %bb.17: ; %frem.else53 +; GFX1150-FAKE16-NEXT: ; %bb.17: ; %frem.else20 ; GFX1150-FAKE16-NEXT: s_cmp_eq_f32 s10, s9 ; GFX1150-FAKE16-NEXT: v_bfi_b32 v2, 0x7fff, 0, s7 ; GFX1150-FAKE16-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -10473,7 +10473,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: s_branch .LBB10_24 ; GFX1150-FAKE16-NEXT: .LBB10_18: ; GFX1150-FAKE16-NEXT: ; implicit-def: $vgpr2 -; GFX1150-FAKE16-NEXT: .LBB10_19: ; %frem.compute52 +; GFX1150-FAKE16-NEXT: .LBB10_19: ; %frem.compute19 ; GFX1150-FAKE16-NEXT: v_frexp_mant_f32_e32 v3, s9 ; GFX1150-FAKE16-NEXT: v_frexp_mant_f32_e32 v2, s10 ; GFX1150-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v5, s10 @@ -10508,11 +10508,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v6 ; GFX1150-FAKE16-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; GFX1150-FAKE16-NEXT: s_cbranch_vccnz .LBB10_23 -; GFX1150-FAKE16-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; GFX1150-FAKE16-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; GFX1150-FAKE16-NEXT: s_sub_i32 s9, s10, s9 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-FAKE16-NEXT: s_add_i32 s9, s9, 11 -; GFX1150-FAKE16-NEXT: .LBB10_21: ; %frem.loop_body60 +; GFX1150-FAKE16-NEXT: .LBB10_21: ; %frem.loop_body27 ; GFX1150-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v7, v4 @@ -10534,7 +10534,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: ; %bb.22: ; %Flow125 ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v6, s9 ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v4, v7 -; GFX1150-FAKE16-NEXT: .LBB10_23: ; %frem.loop_exit61 +; GFX1150-FAKE16-NEXT: .LBB10_23: ; %frem.loop_exit28 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-FAKE16-NEXT: v_add_nc_u32_e32 v6, -10, v6 ; GFX1150-FAKE16-NEXT: v_ldexp_f32 v4, v4, v6 @@ -10563,7 +10563,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1150-FAKE16-NEXT: s_cmp_ngt_f32 s12, s11 ; GFX1150-FAKE16-NEXT: s_cbranch_scc0 .LBB10_26 -; GFX1150-FAKE16-NEXT: ; %bb.25: ; %frem.else86 +; GFX1150-FAKE16-NEXT: ; %bb.25: ; %frem.else ; GFX1150-FAKE16-NEXT: s_cmp_eq_f32 s12, s11 ; GFX1150-FAKE16-NEXT: v_bfi_b32 v3, 0x7fff, 0, s10 ; GFX1150-FAKE16-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -10573,7 +10573,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: s_branch .LBB10_32 ; GFX1150-FAKE16-NEXT: .LBB10_26: ; GFX1150-FAKE16-NEXT: ; implicit-def: $vgpr3 -; GFX1150-FAKE16-NEXT: .LBB10_27: ; %frem.compute85 +; GFX1150-FAKE16-NEXT: .LBB10_27: ; %frem.compute ; GFX1150-FAKE16-NEXT: v_frexp_mant_f32_e32 v4, s11 ; GFX1150-FAKE16-NEXT: v_frexp_mant_f32_e32 v3, s12 ; GFX1150-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v6, s12 @@ -10608,11 +10608,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v7 ; GFX1150-FAKE16-NEXT: v_div_fixup_f32 v6, v6, v4, 1.0 ; GFX1150-FAKE16-NEXT: s_cbranch_vccnz .LBB10_31 -; GFX1150-FAKE16-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; GFX1150-FAKE16-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX1150-FAKE16-NEXT: s_sub_i32 s11, s12, s11 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-FAKE16-NEXT: s_add_i32 s11, s11, 11 -; GFX1150-FAKE16-NEXT: .LBB10_29: ; %frem.loop_body93 +; GFX1150-FAKE16-NEXT: .LBB10_29: ; %frem.loop_body ; GFX1150-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v8, v5 @@ -10634,7 +10634,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-FAKE16-NEXT: ; %bb.30: ; %Flow ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v7, s11 ; GFX1150-FAKE16-NEXT: v_mov_b32_e32 v5, v8 -; GFX1150-FAKE16-NEXT: .LBB10_31: ; %frem.loop_exit94 +; GFX1150-FAKE16-NEXT: .LBB10_31: ; %frem.loop_exit ; GFX1150-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-FAKE16-NEXT: v_add_nc_u32_e32 v7, -10, v7 ; GFX1150-FAKE16-NEXT: v_ldexp_f32 v5, v5, v7 @@ -10712,7 +10712,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1200-TRUE16-NEXT: s_cmp_ngt_f32 s8, s6 ; GFX1200-TRUE16-NEXT: s_cbranch_scc0 .LBB10_2 -; GFX1200-TRUE16-NEXT: ; %bb.1: ; %frem.else +; GFX1200-TRUE16-NEXT: ; %bb.1: ; %frem.else86 ; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v0.l, s5 ; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v1.l, 0 ; GFX1200-TRUE16-NEXT: s_cmp_eq_f32 s8, s6 @@ -10724,7 +10724,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: s_branch .LBB10_8 ; GFX1200-TRUE16-NEXT: .LBB10_2: ; GFX1200-TRUE16-NEXT: ; implicit-def: $vgpr0 -; GFX1200-TRUE16-NEXT: .LBB10_3: ; %frem.compute +; GFX1200-TRUE16-NEXT: .LBB10_3: ; %frem.compute85 ; GFX1200-TRUE16-NEXT: v_frexp_mant_f32_e32 v1, s6 ; GFX1200-TRUE16-NEXT: v_frexp_mant_f32_e32 v0, s8 ; GFX1200-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v3, s8 @@ -10759,11 +10759,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v4 ; GFX1200-TRUE16-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; GFX1200-TRUE16-NEXT: s_cbranch_vccnz .LBB10_7 -; GFX1200-TRUE16-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1200-TRUE16-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; GFX1200-TRUE16-NEXT: s_sub_co_i32 s6, s8, s6 ; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX1200-TRUE16-NEXT: s_add_co_i32 s6, s6, 11 -; GFX1200-TRUE16-NEXT: .LBB10_5: ; %frem.loop_body +; GFX1200-TRUE16-NEXT: .LBB10_5: ; %frem.loop_body93 ; GFX1200-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v5, v2 @@ -10787,7 +10787,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: ; %bb.6: ; %Flow133 ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v4, s6 ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v2, v5 -; GFX1200-TRUE16-NEXT: .LBB10_7: ; %frem.loop_exit +; GFX1200-TRUE16-NEXT: .LBB10_7: ; %frem.loop_exit94 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_add_nc_u32_e32 v4, -10, v4 ; GFX1200-TRUE16-NEXT: v_ldexp_f32 v2, v2, v4 @@ -10821,7 +10821,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_2) ; GFX1200-TRUE16-NEXT: s_cmp_ngt_f32 s10, s9 ; GFX1200-TRUE16-NEXT: s_cbranch_scc0 .LBB10_10 -; GFX1200-TRUE16-NEXT: ; %bb.9: ; %frem.else20 +; GFX1200-TRUE16-NEXT: ; %bb.9: ; %frem.else53 ; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v1.l, s8 ; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0 ; GFX1200-TRUE16-NEXT: s_cmp_eq_f32 s10, s9 @@ -10833,7 +10833,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: s_branch .LBB10_16 ; GFX1200-TRUE16-NEXT: .LBB10_10: ; GFX1200-TRUE16-NEXT: ; implicit-def: $vgpr1 -; GFX1200-TRUE16-NEXT: .LBB10_11: ; %frem.compute19 +; GFX1200-TRUE16-NEXT: .LBB10_11: ; %frem.compute52 ; GFX1200-TRUE16-NEXT: v_frexp_mant_f32_e32 v2, s9 ; GFX1200-TRUE16-NEXT: v_frexp_mant_f32_e32 v1, s10 ; GFX1200-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v4, s10 @@ -10869,11 +10869,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v5 ; GFX1200-TRUE16-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; GFX1200-TRUE16-NEXT: s_cbranch_vccnz .LBB10_15 -; GFX1200-TRUE16-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX1200-TRUE16-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; GFX1200-TRUE16-NEXT: s_sub_co_i32 s9, s10, s9 ; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX1200-TRUE16-NEXT: s_add_co_i32 s9, s9, 11 -; GFX1200-TRUE16-NEXT: .LBB10_13: ; %frem.loop_body27 +; GFX1200-TRUE16-NEXT: .LBB10_13: ; %frem.loop_body60 ; GFX1200-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v6, v3 @@ -10897,7 +10897,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: ; %bb.14: ; %Flow129 ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v5, s9 ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v3, v6 -; GFX1200-TRUE16-NEXT: .LBB10_15: ; %frem.loop_exit28 +; GFX1200-TRUE16-NEXT: .LBB10_15: ; %frem.loop_exit61 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_add_nc_u32_e32 v5, -10, v5 ; GFX1200-TRUE16-NEXT: v_ldexp_f32 v3, v3, v5 @@ -10928,7 +10928,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_2) ; GFX1200-TRUE16-NEXT: s_cmp_ngt_f32 s10, s9 ; GFX1200-TRUE16-NEXT: s_cbranch_scc0 .LBB10_18 -; GFX1200-TRUE16-NEXT: ; %bb.17: ; %frem.else53 +; GFX1200-TRUE16-NEXT: ; %bb.17: ; %frem.else20 ; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v2.l, s7 ; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v3.l, 0 ; GFX1200-TRUE16-NEXT: s_cmp_eq_f32 s10, s9 @@ -10941,7 +10941,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: s_branch .LBB10_24 ; GFX1200-TRUE16-NEXT: .LBB10_18: ; GFX1200-TRUE16-NEXT: ; implicit-def: $vgpr2 -; GFX1200-TRUE16-NEXT: .LBB10_19: ; %frem.compute52 +; GFX1200-TRUE16-NEXT: .LBB10_19: ; %frem.compute19 ; GFX1200-TRUE16-NEXT: v_frexp_mant_f32_e32 v3, s9 ; GFX1200-TRUE16-NEXT: v_frexp_mant_f32_e32 v2, s10 ; GFX1200-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v5, s10 @@ -10977,11 +10977,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v6 ; GFX1200-TRUE16-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; GFX1200-TRUE16-NEXT: s_cbranch_vccnz .LBB10_23 -; GFX1200-TRUE16-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; GFX1200-TRUE16-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; GFX1200-TRUE16-NEXT: s_sub_co_i32 s9, s10, s9 ; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX1200-TRUE16-NEXT: s_add_co_i32 s9, s9, 11 -; GFX1200-TRUE16-NEXT: .LBB10_21: ; %frem.loop_body60 +; GFX1200-TRUE16-NEXT: .LBB10_21: ; %frem.loop_body27 ; GFX1200-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v7, v4 @@ -11005,7 +11005,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: ; %bb.22: ; %Flow125 ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v6, s9 ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v4, v7 -; GFX1200-TRUE16-NEXT: .LBB10_23: ; %frem.loop_exit61 +; GFX1200-TRUE16-NEXT: .LBB10_23: ; %frem.loop_exit28 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_add_nc_u32_e32 v6, -10, v6 ; GFX1200-TRUE16-NEXT: v_ldexp_f32 v4, v4, v6 @@ -11039,7 +11039,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_2) ; GFX1200-TRUE16-NEXT: s_cmp_ngt_f32 s12, s11 ; GFX1200-TRUE16-NEXT: s_cbranch_scc0 .LBB10_26 -; GFX1200-TRUE16-NEXT: ; %bb.25: ; %frem.else86 +; GFX1200-TRUE16-NEXT: ; %bb.25: ; %frem.else ; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v3.l, s10 ; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v4.l, 0 ; GFX1200-TRUE16-NEXT: s_cmp_eq_f32 s12, s11 @@ -11051,7 +11051,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: s_branch .LBB10_32 ; GFX1200-TRUE16-NEXT: .LBB10_26: ; GFX1200-TRUE16-NEXT: ; implicit-def: $vgpr3 -; GFX1200-TRUE16-NEXT: .LBB10_27: ; %frem.compute85 +; GFX1200-TRUE16-NEXT: .LBB10_27: ; %frem.compute ; GFX1200-TRUE16-NEXT: v_frexp_mant_f32_e32 v4, s11 ; GFX1200-TRUE16-NEXT: v_frexp_mant_f32_e32 v3, s12 ; GFX1200-TRUE16-NEXT: v_frexp_exp_i32_f32_e32 v6, s12 @@ -11087,11 +11087,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v7 ; GFX1200-TRUE16-NEXT: v_div_fixup_f32 v6, v6, v4, 1.0 ; GFX1200-TRUE16-NEXT: s_cbranch_vccnz .LBB10_31 -; GFX1200-TRUE16-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; GFX1200-TRUE16-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX1200-TRUE16-NEXT: s_sub_co_i32 s11, s12, s11 ; GFX1200-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX1200-TRUE16-NEXT: s_add_co_i32 s11, s11, 11 -; GFX1200-TRUE16-NEXT: .LBB10_29: ; %frem.loop_body93 +; GFX1200-TRUE16-NEXT: .LBB10_29: ; %frem.loop_body ; GFX1200-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v8, v5 @@ -11115,7 +11115,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-TRUE16-NEXT: ; %bb.30: ; %Flow ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v7, s11 ; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v5, v8 -; GFX1200-TRUE16-NEXT: .LBB10_31: ; %frem.loop_exit94 +; GFX1200-TRUE16-NEXT: .LBB10_31: ; %frem.loop_exit ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_add_nc_u32_e32 v7, -10, v7 ; GFX1200-TRUE16-NEXT: v_ldexp_f32 v5, v5, v7 @@ -11203,7 +11203,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) ; GFX1200-FAKE16-NEXT: s_cmp_ngt_f32 s8, s6 ; GFX1200-FAKE16-NEXT: s_cbranch_scc0 .LBB10_2 -; GFX1200-FAKE16-NEXT: ; %bb.1: ; %frem.else +; GFX1200-FAKE16-NEXT: ; %bb.1: ; %frem.else86 ; GFX1200-FAKE16-NEXT: s_cmp_eq_f32 s8, s6 ; GFX1200-FAKE16-NEXT: v_bfi_b32 v0, 0x7fff, 0, s5 ; GFX1200-FAKE16-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -11213,7 +11213,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: s_branch .LBB10_8 ; GFX1200-FAKE16-NEXT: .LBB10_2: ; GFX1200-FAKE16-NEXT: ; implicit-def: $vgpr0 -; GFX1200-FAKE16-NEXT: .LBB10_3: ; %frem.compute +; GFX1200-FAKE16-NEXT: .LBB10_3: ; %frem.compute85 ; GFX1200-FAKE16-NEXT: v_frexp_mant_f32_e32 v1, s6 ; GFX1200-FAKE16-NEXT: v_frexp_mant_f32_e32 v0, s8 ; GFX1200-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v3, s8 @@ -11249,11 +11249,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v4 ; GFX1200-FAKE16-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; GFX1200-FAKE16-NEXT: s_cbranch_vccnz .LBB10_7 -; GFX1200-FAKE16-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1200-FAKE16-NEXT: ; %bb.4: ; %frem.loop_body93.preheader ; GFX1200-FAKE16-NEXT: s_sub_co_i32 s6, s8, s6 ; GFX1200-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX1200-FAKE16-NEXT: s_add_co_i32 s6, s6, 11 -; GFX1200-FAKE16-NEXT: .LBB10_5: ; %frem.loop_body +; GFX1200-FAKE16-NEXT: .LBB10_5: ; %frem.loop_body93 ; GFX1200-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v5, v2 @@ -11277,7 +11277,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: ; %bb.6: ; %Flow133 ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v4, s6 ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v2, v5 -; GFX1200-FAKE16-NEXT: .LBB10_7: ; %frem.loop_exit +; GFX1200-FAKE16-NEXT: .LBB10_7: ; %frem.loop_exit94 ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-FAKE16-NEXT: v_add_nc_u32_e32 v4, -10, v4 ; GFX1200-FAKE16-NEXT: v_ldexp_f32 v2, v2, v4 @@ -11310,7 +11310,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_2) ; GFX1200-FAKE16-NEXT: s_cmp_ngt_f32 s10, s9 ; GFX1200-FAKE16-NEXT: s_cbranch_scc0 .LBB10_10 -; GFX1200-FAKE16-NEXT: ; %bb.9: ; %frem.else20 +; GFX1200-FAKE16-NEXT: ; %bb.9: ; %frem.else53 ; GFX1200-FAKE16-NEXT: s_cmp_eq_f32 s10, s9 ; GFX1200-FAKE16-NEXT: v_bfi_b32 v1, 0x7fff, 0, s8 ; GFX1200-FAKE16-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -11321,7 +11321,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: s_branch .LBB10_16 ; GFX1200-FAKE16-NEXT: .LBB10_10: ; GFX1200-FAKE16-NEXT: ; implicit-def: $vgpr1 -; GFX1200-FAKE16-NEXT: .LBB10_11: ; %frem.compute19 +; GFX1200-FAKE16-NEXT: .LBB10_11: ; %frem.compute52 ; GFX1200-FAKE16-NEXT: v_frexp_mant_f32_e32 v2, s9 ; GFX1200-FAKE16-NEXT: v_frexp_mant_f32_e32 v1, s10 ; GFX1200-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v4, s10 @@ -11357,11 +11357,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v5 ; GFX1200-FAKE16-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; GFX1200-FAKE16-NEXT: s_cbranch_vccnz .LBB10_15 -; GFX1200-FAKE16-NEXT: ; %bb.12: ; %frem.loop_body27.preheader +; GFX1200-FAKE16-NEXT: ; %bb.12: ; %frem.loop_body60.preheader ; GFX1200-FAKE16-NEXT: s_sub_co_i32 s9, s10, s9 ; GFX1200-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX1200-FAKE16-NEXT: s_add_co_i32 s9, s9, 11 -; GFX1200-FAKE16-NEXT: .LBB10_13: ; %frem.loop_body27 +; GFX1200-FAKE16-NEXT: .LBB10_13: ; %frem.loop_body60 ; GFX1200-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v6, v3 @@ -11385,7 +11385,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: ; %bb.14: ; %Flow129 ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v5, s9 ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v3, v6 -; GFX1200-FAKE16-NEXT: .LBB10_15: ; %frem.loop_exit28 +; GFX1200-FAKE16-NEXT: .LBB10_15: ; %frem.loop_exit61 ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-FAKE16-NEXT: v_add_nc_u32_e32 v5, -10, v5 ; GFX1200-FAKE16-NEXT: v_ldexp_f32 v3, v3, v5 @@ -11415,7 +11415,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_2) ; GFX1200-FAKE16-NEXT: s_cmp_ngt_f32 s10, s9 ; GFX1200-FAKE16-NEXT: s_cbranch_scc0 .LBB10_18 -; GFX1200-FAKE16-NEXT: ; %bb.17: ; %frem.else53 +; GFX1200-FAKE16-NEXT: ; %bb.17: ; %frem.else20 ; GFX1200-FAKE16-NEXT: s_cmp_eq_f32 s10, s9 ; GFX1200-FAKE16-NEXT: v_bfi_b32 v2, 0x7fff, 0, s7 ; GFX1200-FAKE16-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -11426,7 +11426,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: s_branch .LBB10_24 ; GFX1200-FAKE16-NEXT: .LBB10_18: ; GFX1200-FAKE16-NEXT: ; implicit-def: $vgpr2 -; GFX1200-FAKE16-NEXT: .LBB10_19: ; %frem.compute52 +; GFX1200-FAKE16-NEXT: .LBB10_19: ; %frem.compute19 ; GFX1200-FAKE16-NEXT: v_frexp_mant_f32_e32 v3, s9 ; GFX1200-FAKE16-NEXT: v_frexp_mant_f32_e32 v2, s10 ; GFX1200-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v5, s10 @@ -11462,11 +11462,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v6 ; GFX1200-FAKE16-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; GFX1200-FAKE16-NEXT: s_cbranch_vccnz .LBB10_23 -; GFX1200-FAKE16-NEXT: ; %bb.20: ; %frem.loop_body60.preheader +; GFX1200-FAKE16-NEXT: ; %bb.20: ; %frem.loop_body27.preheader ; GFX1200-FAKE16-NEXT: s_sub_co_i32 s9, s10, s9 ; GFX1200-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX1200-FAKE16-NEXT: s_add_co_i32 s9, s9, 11 -; GFX1200-FAKE16-NEXT: .LBB10_21: ; %frem.loop_body60 +; GFX1200-FAKE16-NEXT: .LBB10_21: ; %frem.loop_body27 ; GFX1200-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v7, v4 @@ -11490,7 +11490,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: ; %bb.22: ; %Flow125 ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v6, s9 ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v4, v7 -; GFX1200-FAKE16-NEXT: .LBB10_23: ; %frem.loop_exit61 +; GFX1200-FAKE16-NEXT: .LBB10_23: ; %frem.loop_exit28 ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-FAKE16-NEXT: v_add_nc_u32_e32 v6, -10, v6 ; GFX1200-FAKE16-NEXT: v_ldexp_f32 v4, v4, v6 @@ -11523,7 +11523,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_2) ; GFX1200-FAKE16-NEXT: s_cmp_ngt_f32 s12, s11 ; GFX1200-FAKE16-NEXT: s_cbranch_scc0 .LBB10_26 -; GFX1200-FAKE16-NEXT: ; %bb.25: ; %frem.else86 +; GFX1200-FAKE16-NEXT: ; %bb.25: ; %frem.else ; GFX1200-FAKE16-NEXT: s_cmp_eq_f32 s12, s11 ; GFX1200-FAKE16-NEXT: v_bfi_b32 v3, 0x7fff, 0, s10 ; GFX1200-FAKE16-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -11534,7 +11534,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: s_branch .LBB10_32 ; GFX1200-FAKE16-NEXT: .LBB10_26: ; GFX1200-FAKE16-NEXT: ; implicit-def: $vgpr3 -; GFX1200-FAKE16-NEXT: .LBB10_27: ; %frem.compute85 +; GFX1200-FAKE16-NEXT: .LBB10_27: ; %frem.compute ; GFX1200-FAKE16-NEXT: v_frexp_mant_f32_e32 v4, s11 ; GFX1200-FAKE16-NEXT: v_frexp_mant_f32_e32 v3, s12 ; GFX1200-FAKE16-NEXT: v_frexp_exp_i32_f32_e32 v6, s12 @@ -11570,11 +11570,11 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: v_cmp_gt_i32_e32 vcc_lo, 12, v7 ; GFX1200-FAKE16-NEXT: v_div_fixup_f32 v6, v6, v4, 1.0 ; GFX1200-FAKE16-NEXT: s_cbranch_vccnz .LBB10_31 -; GFX1200-FAKE16-NEXT: ; %bb.28: ; %frem.loop_body93.preheader +; GFX1200-FAKE16-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX1200-FAKE16-NEXT: s_sub_co_i32 s11, s12, s11 ; GFX1200-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX1200-FAKE16-NEXT: s_add_co_i32 s11, s11, 11 -; GFX1200-FAKE16-NEXT: .LBB10_29: ; %frem.loop_body93 +; GFX1200-FAKE16-NEXT: .LBB10_29: ; %frem.loop_body ; GFX1200-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v8, v5 @@ -11598,7 +11598,7 @@ define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-FAKE16-NEXT: ; %bb.30: ; %Flow ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v7, s11 ; GFX1200-FAKE16-NEXT: v_mov_b32_e32 v5, v8 -; GFX1200-FAKE16-NEXT: .LBB10_31: ; %frem.loop_exit94 +; GFX1200-FAKE16-NEXT: .LBB10_31: ; %frem.loop_exit ; GFX1200-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-FAKE16-NEXT: v_add_nc_u32_e32 v7, -10, v7 ; GFX1200-FAKE16-NEXT: v_ldexp_f32 v5, v5, v7 @@ -11686,7 +11686,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v0|, |v2| ; SI-NEXT: s_and_b64 vcc, exec, s[2:3] ; SI-NEXT: s_cbranch_vccz .LBB11_2 -; SI-NEXT: ; %bb.1: ; %frem.else +; SI-NEXT: ; %bb.1: ; %frem.else16 ; SI-NEXT: s_brev_b32 s2, -2 ; SI-NEXT: v_bfi_b32 v4, s2, 0, v0 ; SI-NEXT: v_cmp_eq_f32_e64 vcc, |v0|, |v2| @@ -11697,7 +11697,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB11_2: ; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB11_3: ; %frem.compute +; SI-NEXT: .LBB11_3: ; %frem.compute15 ; SI-NEXT: s_mov_b32 s6, 0x7f800000 ; SI-NEXT: v_cmp_lt_f32_e64 s[2:3], |v0|, s6 ; SI-NEXT: v_frexp_exp_i32_f32_e32 v4, v0 @@ -11733,10 +11733,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f32 v6, v6, v4, 1.0 ; SI-NEXT: s_cmp_lt_i32 s3, 13 ; SI-NEXT: s_cbranch_scc1 .LBB11_7 -; SI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; SI-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; SI-NEXT: s_sub_i32 s3, s4, s5 ; SI-NEXT: s_add_i32 s3, s3, 12 -; SI-NEXT: .LBB11_5: ; %frem.loop_body +; SI-NEXT: .LBB11_5: ; %frem.loop_body23 ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v7, v5 ; SI-NEXT: v_mul_f32_e32 v5, v7, v6 @@ -11751,7 +11751,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_cbranch_scc1 .LBB11_5 ; SI-NEXT: ; %bb.6: ; %Flow51 ; SI-NEXT: v_mov_b32_e32 v5, v7 -; SI-NEXT: .LBB11_7: ; %frem.loop_exit +; SI-NEXT: .LBB11_7: ; %frem.loop_exit24 ; SI-NEXT: s_add_i32 s3, s3, -11 ; SI-NEXT: v_ldexp_f32_e64 v5, v5, s3 ; SI-NEXT: v_mul_f32_e32 v6, v5, v6 @@ -11767,7 +11767,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v1|, |v3| ; SI-NEXT: s_and_b64 vcc, exec, s[2:3] ; SI-NEXT: s_cbranch_vccz .LBB11_10 -; SI-NEXT: ; %bb.9: ; %frem.else16 +; SI-NEXT: ; %bb.9: ; %frem.else ; SI-NEXT: s_brev_b32 s2, -2 ; SI-NEXT: v_bfi_b32 v5, s2, 0, v1 ; SI-NEXT: v_cmp_eq_f32_e64 vcc, |v1|, |v3| @@ -11778,7 +11778,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB11_10: ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB11_11: ; %frem.compute15 +; SI-NEXT: .LBB11_11: ; %frem.compute ; SI-NEXT: s_mov_b32 s6, 0x7f800000 ; SI-NEXT: v_cmp_lt_f32_e64 s[2:3], |v1|, s6 ; SI-NEXT: v_frexp_exp_i32_f32_e32 v5, v1 @@ -11814,10 +11814,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f32 v7, v7, v5, 1.0 ; SI-NEXT: s_cmp_lt_i32 s3, 13 ; SI-NEXT: s_cbranch_scc1 .LBB11_15 -; SI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; SI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; SI-NEXT: s_sub_i32 s3, s4, s5 ; SI-NEXT: s_add_i32 s3, s3, 12 -; SI-NEXT: .LBB11_13: ; %frem.loop_body23 +; SI-NEXT: .LBB11_13: ; %frem.loop_body ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v8, v6 ; SI-NEXT: v_mul_f32_e32 v6, v8, v7 @@ -11832,7 +11832,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_cbranch_scc1 .LBB11_13 ; SI-NEXT: ; %bb.14: ; %Flow ; SI-NEXT: v_mov_b32_e32 v6, v8 -; SI-NEXT: .LBB11_15: ; %frem.loop_exit24 +; SI-NEXT: .LBB11_15: ; %frem.loop_exit ; SI-NEXT: s_add_i32 s3, s3, -11 ; SI-NEXT: v_ldexp_f32_e64 v6, v6, s3 ; SI-NEXT: v_mul_f32_e32 v7, v6, v7 @@ -11877,7 +11877,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v0|, |v2| ; CI-NEXT: s_and_b64 vcc, exec, s[2:3] ; CI-NEXT: s_cbranch_vccz .LBB11_2 -; CI-NEXT: ; %bb.1: ; %frem.else +; CI-NEXT: ; %bb.1: ; %frem.else16 ; CI-NEXT: s_brev_b32 s2, -2 ; CI-NEXT: v_bfi_b32 v4, s2, 0, v0 ; CI-NEXT: v_cmp_eq_f32_e64 vcc, |v0|, |v2| @@ -11886,7 +11886,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB11_8 ; CI-NEXT: .LBB11_2: ; CI-NEXT: ; implicit-def: $vgpr4 -; CI-NEXT: .LBB11_3: ; %frem.compute +; CI-NEXT: .LBB11_3: ; %frem.compute15 ; CI-NEXT: v_frexp_mant_f32_e64 v5, |v2| ; CI-NEXT: v_ldexp_f32_e64 v5, v5, 1 ; CI-NEXT: v_div_scale_f32 v11, s[2:3], v5, v5, 1.0 @@ -11911,10 +11911,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 13, v6 ; CI-NEXT: v_div_fixup_f32 v8, v8, v5, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB11_7 -; CI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; CI-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; CI-NEXT: v_sub_i32_e32 v6, vcc, v9, v10 ; CI-NEXT: v_add_i32_e32 v6, vcc, 12, v6 -; CI-NEXT: .LBB11_5: ; %frem.loop_body +; CI-NEXT: .LBB11_5: ; %frem.loop_body23 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v9, v7 ; CI-NEXT: v_mul_f32_e32 v7, v9, v8 @@ -11929,7 +11929,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_cbranch_vccnz .LBB11_5 ; CI-NEXT: ; %bb.6: ; %Flow51 ; CI-NEXT: v_mov_b32_e32 v7, v9 -; CI-NEXT: .LBB11_7: ; %frem.loop_exit +; CI-NEXT: .LBB11_7: ; %frem.loop_exit24 ; CI-NEXT: v_add_i32_e32 v6, vcc, -11, v6 ; CI-NEXT: v_ldexp_f32_e32 v6, v7, v6 ; CI-NEXT: v_mul_f32_e32 v7, v6, v8 @@ -11945,7 +11945,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v1|, |v3| ; CI-NEXT: s_and_b64 vcc, exec, s[2:3] ; CI-NEXT: s_cbranch_vccz .LBB11_10 -; CI-NEXT: ; %bb.9: ; %frem.else16 +; CI-NEXT: ; %bb.9: ; %frem.else ; CI-NEXT: s_brev_b32 s2, -2 ; CI-NEXT: v_bfi_b32 v5, s2, 0, v1 ; CI-NEXT: v_cmp_eq_f32_e64 vcc, |v1|, |v3| @@ -11954,7 +11954,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB11_16 ; CI-NEXT: .LBB11_10: ; CI-NEXT: ; implicit-def: $vgpr5 -; CI-NEXT: .LBB11_11: ; %frem.compute15 +; CI-NEXT: .LBB11_11: ; %frem.compute ; CI-NEXT: v_frexp_mant_f32_e64 v6, |v3| ; CI-NEXT: v_ldexp_f32_e64 v6, v6, 1 ; CI-NEXT: v_div_scale_f32 v12, s[2:3], v6, v6, 1.0 @@ -11979,10 +11979,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 13, v7 ; CI-NEXT: v_div_fixup_f32 v9, v9, v6, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB11_15 -; CI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; CI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; CI-NEXT: v_sub_i32_e32 v7, vcc, v10, v11 ; CI-NEXT: v_add_i32_e32 v7, vcc, 12, v7 -; CI-NEXT: .LBB11_13: ; %frem.loop_body23 +; CI-NEXT: .LBB11_13: ; %frem.loop_body ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v10, v8 ; CI-NEXT: v_mul_f32_e32 v8, v10, v9 @@ -11997,7 +11997,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_cbranch_vccnz .LBB11_13 ; CI-NEXT: ; %bb.14: ; %Flow ; CI-NEXT: v_mov_b32_e32 v8, v10 -; CI-NEXT: .LBB11_15: ; %frem.loop_exit24 +; CI-NEXT: .LBB11_15: ; %frem.loop_exit ; CI-NEXT: v_add_i32_e32 v7, vcc, -11, v7 ; CI-NEXT: v_ldexp_f32_e32 v7, v8, v7 ; CI-NEXT: v_mul_f32_e32 v8, v7, v9 @@ -12042,7 +12042,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v0|, |v2| ; VI-NEXT: s_and_b64 vcc, exec, s[2:3] ; VI-NEXT: s_cbranch_vccz .LBB11_2 -; VI-NEXT: ; %bb.1: ; %frem.else +; VI-NEXT: ; %bb.1: ; %frem.else16 ; VI-NEXT: s_brev_b32 s2, -2 ; VI-NEXT: v_bfi_b32 v4, s2, 0, v0 ; VI-NEXT: v_cmp_eq_f32_e64 vcc, |v0|, |v2| @@ -12051,7 +12051,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB11_8 ; VI-NEXT: .LBB11_2: ; VI-NEXT: ; implicit-def: $vgpr4 -; VI-NEXT: .LBB11_3: ; %frem.compute +; VI-NEXT: .LBB11_3: ; %frem.compute15 ; VI-NEXT: v_frexp_mant_f32_e64 v5, |v2| ; VI-NEXT: v_ldexp_f32 v5, v5, 1 ; VI-NEXT: v_div_scale_f32 v11, s[2:3], v5, v5, 1.0 @@ -12076,10 +12076,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 13, v6 ; VI-NEXT: v_div_fixup_f32 v8, v8, v5, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB11_7 -; VI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; VI-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; VI-NEXT: v_sub_u32_e32 v6, vcc, v9, v10 ; VI-NEXT: v_add_u32_e32 v6, vcc, 12, v6 -; VI-NEXT: .LBB11_5: ; %frem.loop_body +; VI-NEXT: .LBB11_5: ; %frem.loop_body23 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v9, v7 ; VI-NEXT: v_mul_f32_e32 v7, v9, v8 @@ -12094,7 +12094,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_cbranch_vccnz .LBB11_5 ; VI-NEXT: ; %bb.6: ; %Flow51 ; VI-NEXT: v_mov_b32_e32 v7, v9 -; VI-NEXT: .LBB11_7: ; %frem.loop_exit +; VI-NEXT: .LBB11_7: ; %frem.loop_exit24 ; VI-NEXT: v_add_u32_e32 v6, vcc, -11, v6 ; VI-NEXT: v_ldexp_f32 v6, v7, v6 ; VI-NEXT: v_mul_f32_e32 v7, v6, v8 @@ -12110,7 +12110,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v1|, |v3| ; VI-NEXT: s_and_b64 vcc, exec, s[2:3] ; VI-NEXT: s_cbranch_vccz .LBB11_10 -; VI-NEXT: ; %bb.9: ; %frem.else16 +; VI-NEXT: ; %bb.9: ; %frem.else ; VI-NEXT: s_brev_b32 s2, -2 ; VI-NEXT: v_bfi_b32 v5, s2, 0, v1 ; VI-NEXT: v_cmp_eq_f32_e64 vcc, |v1|, |v3| @@ -12119,7 +12119,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB11_16 ; VI-NEXT: .LBB11_10: ; VI-NEXT: ; implicit-def: $vgpr5 -; VI-NEXT: .LBB11_11: ; %frem.compute15 +; VI-NEXT: .LBB11_11: ; %frem.compute ; VI-NEXT: v_frexp_mant_f32_e64 v6, |v3| ; VI-NEXT: v_ldexp_f32 v6, v6, 1 ; VI-NEXT: v_div_scale_f32 v12, s[2:3], v6, v6, 1.0 @@ -12144,10 +12144,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 13, v7 ; VI-NEXT: v_div_fixup_f32 v9, v9, v6, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB11_15 -; VI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; VI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; VI-NEXT: v_sub_u32_e32 v7, vcc, v10, v11 ; VI-NEXT: v_add_u32_e32 v7, vcc, 12, v7 -; VI-NEXT: .LBB11_13: ; %frem.loop_body23 +; VI-NEXT: .LBB11_13: ; %frem.loop_body ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v10, v8 ; VI-NEXT: v_mul_f32_e32 v8, v10, v9 @@ -12162,7 +12162,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_cbranch_vccnz .LBB11_13 ; VI-NEXT: ; %bb.14: ; %Flow ; VI-NEXT: v_mov_b32_e32 v8, v10 -; VI-NEXT: .LBB11_15: ; %frem.loop_exit24 +; VI-NEXT: .LBB11_15: ; %frem.loop_exit ; VI-NEXT: v_add_u32_e32 v7, vcc, -11, v7 ; VI-NEXT: v_ldexp_f32 v7, v8, v7 ; VI-NEXT: v_mul_f32_e32 v8, v7, v9 @@ -12202,7 +12202,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v0|, |v2| ; GFX9-NEXT: s_and_b64 vcc, exec, s[2:3] ; GFX9-NEXT: s_cbranch_vccz .LBB11_2 -; GFX9-NEXT: ; %bb.1: ; %frem.else +; GFX9-NEXT: ; %bb.1: ; %frem.else16 ; GFX9-NEXT: s_brev_b32 s2, -2 ; GFX9-NEXT: v_bfi_b32 v4, s2, 0, v0 ; GFX9-NEXT: v_cmp_eq_f32_e64 vcc, |v0|, |v2| @@ -12211,7 +12211,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB11_8 ; GFX9-NEXT: .LBB11_2: ; GFX9-NEXT: ; implicit-def: $vgpr4 -; GFX9-NEXT: .LBB11_3: ; %frem.compute +; GFX9-NEXT: .LBB11_3: ; %frem.compute15 ; GFX9-NEXT: v_frexp_mant_f32_e64 v5, |v2| ; GFX9-NEXT: v_ldexp_f32 v5, v5, 1 ; GFX9-NEXT: v_div_scale_f32 v11, s[2:3], v5, v5, 1.0 @@ -12236,10 +12236,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 13, v6 ; GFX9-NEXT: v_div_fixup_f32 v8, v8, v5, 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB11_7 -; GFX9-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX9-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; GFX9-NEXT: v_sub_u32_e32 v6, v9, v10 ; GFX9-NEXT: v_add_u32_e32 v6, 12, v6 -; GFX9-NEXT: .LBB11_5: ; %frem.loop_body +; GFX9-NEXT: .LBB11_5: ; %frem.loop_body23 ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v9, v7 ; GFX9-NEXT: v_mul_f32_e32 v7, v9, v8 @@ -12254,7 +12254,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_cbranch_vccnz .LBB11_5 ; GFX9-NEXT: ; %bb.6: ; %Flow51 ; GFX9-NEXT: v_mov_b32_e32 v7, v9 -; GFX9-NEXT: .LBB11_7: ; %frem.loop_exit +; GFX9-NEXT: .LBB11_7: ; %frem.loop_exit24 ; GFX9-NEXT: v_add_u32_e32 v6, -11, v6 ; GFX9-NEXT: v_ldexp_f32 v6, v7, v6 ; GFX9-NEXT: v_mul_f32_e32 v7, v6, v8 @@ -12270,7 +12270,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v1|, |v3| ; GFX9-NEXT: s_and_b64 vcc, exec, s[2:3] ; GFX9-NEXT: s_cbranch_vccz .LBB11_10 -; GFX9-NEXT: ; %bb.9: ; %frem.else16 +; GFX9-NEXT: ; %bb.9: ; %frem.else ; GFX9-NEXT: s_brev_b32 s2, -2 ; GFX9-NEXT: v_bfi_b32 v5, s2, 0, v1 ; GFX9-NEXT: v_cmp_eq_f32_e64 vcc, |v1|, |v3| @@ -12279,7 +12279,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB11_16 ; GFX9-NEXT: .LBB11_10: ; GFX9-NEXT: ; implicit-def: $vgpr5 -; GFX9-NEXT: .LBB11_11: ; %frem.compute15 +; GFX9-NEXT: .LBB11_11: ; %frem.compute ; GFX9-NEXT: v_frexp_mant_f32_e64 v6, |v3| ; GFX9-NEXT: v_ldexp_f32 v6, v6, 1 ; GFX9-NEXT: v_div_scale_f32 v12, s[2:3], v6, v6, 1.0 @@ -12304,10 +12304,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 13, v7 ; GFX9-NEXT: v_div_fixup_f32 v9, v9, v6, 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB11_15 -; GFX9-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX9-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX9-NEXT: v_sub_u32_e32 v7, v10, v11 ; GFX9-NEXT: v_add_u32_e32 v7, 12, v7 -; GFX9-NEXT: .LBB11_13: ; %frem.loop_body23 +; GFX9-NEXT: .LBB11_13: ; %frem.loop_body ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v10, v8 ; GFX9-NEXT: v_mul_f32_e32 v8, v10, v9 @@ -12322,7 +12322,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_cbranch_vccnz .LBB11_13 ; GFX9-NEXT: ; %bb.14: ; %Flow ; GFX9-NEXT: v_mov_b32_e32 v8, v10 -; GFX9-NEXT: .LBB11_15: ; %frem.loop_exit24 +; GFX9-NEXT: .LBB11_15: ; %frem.loop_exit ; GFX9-NEXT: v_add_u32_e32 v7, -11, v7 ; GFX9-NEXT: v_ldexp_f32 v7, v8, v7 ; GFX9-NEXT: v_mul_f32_e32 v8, v7, v9 @@ -12363,7 +12363,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_ngt_f32_e64 s2, |v0|, |v2| ; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX10-NEXT: s_cbranch_vccz .LBB11_2 -; GFX10-NEXT: ; %bb.1: ; %frem.else +; GFX10-NEXT: ; %bb.1: ; %frem.else16 ; GFX10-NEXT: v_bfi_b32 v4, 0x7fffffff, 0, v0 ; GFX10-NEXT: v_cmp_eq_f32_e64 vcc_lo, |v0|, |v2| ; GFX10-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc_lo @@ -12371,7 +12371,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB11_8 ; GFX10-NEXT: .LBB11_2: ; GFX10-NEXT: ; implicit-def: $vgpr4 -; GFX10-NEXT: .LBB11_3: ; %frem.compute +; GFX10-NEXT: .LBB11_3: ; %frem.compute15 ; GFX10-NEXT: v_frexp_mant_f32_e64 v5, |v2| ; GFX10-NEXT: v_frexp_mant_f32_e64 v4, |v0| ; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v7, v0 @@ -12398,10 +12398,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v8 ; GFX10-NEXT: v_div_fixup_f32 v7, v7, v5, 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB11_7 -; GFX10-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX10-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 12 -; GFX10-NEXT: .LBB11_5: ; %frem.loop_body +; GFX10-NEXT: .LBB11_5: ; %frem.loop_body23 ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v9, v6 ; GFX10-NEXT: s_add_i32 s2, s2, -12 @@ -12417,7 +12417,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: ; %bb.6: ; %Flow51 ; GFX10-NEXT: v_mov_b32_e32 v8, s2 ; GFX10-NEXT: v_mov_b32_e32 v6, v9 -; GFX10-NEXT: .LBB11_7: ; %frem.loop_exit +; GFX10-NEXT: .LBB11_7: ; %frem.loop_exit24 ; GFX10-NEXT: v_add_nc_u32_e32 v8, -11, v8 ; GFX10-NEXT: v_ldexp_f32 v6, v6, v8 ; GFX10-NEXT: v_mul_f32_e32 v7, v6, v7 @@ -12432,7 +12432,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_ngt_f32_e64 s2, |v1|, |v3| ; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX10-NEXT: s_cbranch_vccz .LBB11_10 -; GFX10-NEXT: ; %bb.9: ; %frem.else16 +; GFX10-NEXT: ; %bb.9: ; %frem.else ; GFX10-NEXT: v_bfi_b32 v5, 0x7fffffff, 0, v1 ; GFX10-NEXT: v_cmp_eq_f32_e64 vcc_lo, |v1|, |v3| ; GFX10-NEXT: v_cndmask_b32_e32 v5, v1, v5, vcc_lo @@ -12440,7 +12440,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB11_16 ; GFX10-NEXT: .LBB11_10: ; GFX10-NEXT: ; implicit-def: $vgpr5 -; GFX10-NEXT: .LBB11_11: ; %frem.compute15 +; GFX10-NEXT: .LBB11_11: ; %frem.compute ; GFX10-NEXT: v_frexp_mant_f32_e64 v6, |v3| ; GFX10-NEXT: v_frexp_mant_f32_e64 v5, |v1| ; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v8, v1 @@ -12467,10 +12467,10 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v9 ; GFX10-NEXT: v_div_fixup_f32 v8, v8, v6, 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB11_15 -; GFX10-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX10-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 12 -; GFX10-NEXT: .LBB11_13: ; %frem.loop_body23 +; GFX10-NEXT: .LBB11_13: ; %frem.loop_body ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v10, v7 ; GFX10-NEXT: s_add_i32 s2, s2, -12 @@ -12486,7 +12486,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: ; %bb.14: ; %Flow ; GFX10-NEXT: v_mov_b32_e32 v9, s2 ; GFX10-NEXT: v_mov_b32_e32 v7, v10 -; GFX10-NEXT: .LBB11_15: ; %frem.loop_exit24 +; GFX10-NEXT: .LBB11_15: ; %frem.loop_exit ; GFX10-NEXT: v_add_nc_u32_e32 v9, -11, v9 ; GFX10-NEXT: v_ldexp_f32 v7, v7, v9 ; GFX10-NEXT: v_mul_f32_e32 v8, v7, v8 @@ -12524,7 +12524,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: v_cmp_ngt_f32_e64 s2, |v0|, |v2| ; GFX11-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX11-NEXT: s_cbranch_vccz .LBB11_2 -; GFX11-NEXT: ; %bb.1: ; %frem.else +; GFX11-NEXT: ; %bb.1: ; %frem.else16 ; GFX11-NEXT: v_bfi_b32 v4, 0x7fffffff, 0, v0 ; GFX11-NEXT: v_cmp_eq_f32_e64 vcc_lo, |v0|, |v2| ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) @@ -12533,7 +12533,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_branch .LBB11_8 ; GFX11-NEXT: .LBB11_2: ; GFX11-NEXT: ; implicit-def: $vgpr4 -; GFX11-NEXT: .LBB11_3: ; %frem.compute +; GFX11-NEXT: .LBB11_3: ; %frem.compute15 ; GFX11-NEXT: v_frexp_mant_f32_e64 v5, |v2| ; GFX11-NEXT: v_frexp_mant_f32_e64 v4, |v0| ; GFX11-NEXT: v_frexp_exp_i32_f32_e32 v7, v0 @@ -12569,11 +12569,11 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_div_fixup_f32 v7, v7, v5, 1.0 ; GFX11-NEXT: s_cbranch_vccnz .LBB11_7 -; GFX11-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX11-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; GFX11-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_add_i32 s2, s2, 12 -; GFX11-NEXT: .LBB11_5: ; %frem.loop_body +; GFX11-NEXT: .LBB11_5: ; %frem.loop_body23 ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_mov_b32_e32 v9, v6 @@ -12593,7 +12593,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: ; %bb.6: ; %Flow51 ; GFX11-NEXT: v_mov_b32_e32 v8, s2 ; GFX11-NEXT: v_mov_b32_e32 v6, v9 -; GFX11-NEXT: .LBB11_7: ; %frem.loop_exit +; GFX11-NEXT: .LBB11_7: ; %frem.loop_exit24 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_nc_u32_e32 v8, -11, v8 ; GFX11-NEXT: v_ldexp_f32 v6, v6, v8 @@ -12613,7 +12613,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: v_cmp_ngt_f32_e64 s2, |v1|, |v3| ; GFX11-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX11-NEXT: s_cbranch_vccz .LBB11_10 -; GFX11-NEXT: ; %bb.9: ; %frem.else16 +; GFX11-NEXT: ; %bb.9: ; %frem.else ; GFX11-NEXT: v_bfi_b32 v5, 0x7fffffff, 0, v1 ; GFX11-NEXT: v_cmp_eq_f32_e64 vcc_lo, |v1|, |v3| ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) @@ -12622,7 +12622,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_branch .LBB11_16 ; GFX11-NEXT: .LBB11_10: ; GFX11-NEXT: ; implicit-def: $vgpr5 -; GFX11-NEXT: .LBB11_11: ; %frem.compute15 +; GFX11-NEXT: .LBB11_11: ; %frem.compute ; GFX11-NEXT: v_frexp_mant_f32_e64 v6, |v3| ; GFX11-NEXT: v_frexp_mant_f32_e64 v5, |v1| ; GFX11-NEXT: v_frexp_exp_i32_f32_e32 v8, v1 @@ -12658,11 +12658,11 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_div_fixup_f32 v8, v8, v6, 1.0 ; GFX11-NEXT: s_cbranch_vccnz .LBB11_15 -; GFX11-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX11-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX11-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_add_i32 s2, s2, 12 -; GFX11-NEXT: .LBB11_13: ; %frem.loop_body23 +; GFX11-NEXT: .LBB11_13: ; %frem.loop_body ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_mov_b32_e32 v10, v7 @@ -12682,7 +12682,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: ; %bb.14: ; %Flow ; GFX11-NEXT: v_mov_b32_e32 v9, s2 ; GFX11-NEXT: v_mov_b32_e32 v7, v10 -; GFX11-NEXT: .LBB11_15: ; %frem.loop_exit24 +; GFX11-NEXT: .LBB11_15: ; %frem.loop_exit ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_nc_u32_e32 v9, -11, v9 ; GFX11-NEXT: v_ldexp_f32 v7, v7, v9 @@ -12730,7 +12730,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_cmp_ngt_f32 s3, s8 ; GFX1150-NEXT: s_cbranch_scc0 .LBB11_2 -; GFX1150-NEXT: ; %bb.1: ; %frem.else +; GFX1150-NEXT: ; %bb.1: ; %frem.else16 ; GFX1150-NEXT: s_cmp_eq_f32 s3, s8 ; GFX1150-NEXT: v_bfi_b32 v0, 0x7fffffff, 0, s6 ; GFX1150-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -12740,7 +12740,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_branch .LBB11_8 ; GFX1150-NEXT: .LBB11_2: ; GFX1150-NEXT: ; implicit-def: $vgpr0 -; GFX1150-NEXT: .LBB11_3: ; %frem.compute +; GFX1150-NEXT: .LBB11_3: ; %frem.compute15 ; GFX1150-NEXT: v_frexp_mant_f32_e64 v1, |s4| ; GFX1150-NEXT: v_frexp_mant_f32_e64 v0, |s6| ; GFX1150-NEXT: v_frexp_exp_i32_f32_e32 v3, s6 @@ -12775,11 +12775,11 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v4 ; GFX1150-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; GFX1150-NEXT: s_cbranch_vccnz .LBB11_7 -; GFX1150-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1150-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; GFX1150-NEXT: s_sub_i32 s7, s7, s8 ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_add_i32 s7, s7, 12 -; GFX1150-NEXT: .LBB11_5: ; %frem.loop_body +; GFX1150-NEXT: .LBB11_5: ; %frem.loop_body23 ; GFX1150-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-NEXT: v_mov_b32_e32 v5, v2 @@ -12801,7 +12801,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: ; %bb.6: ; %Flow51 ; GFX1150-NEXT: v_mov_b32_e32 v4, s7 ; GFX1150-NEXT: v_mov_b32_e32 v2, v5 -; GFX1150-NEXT: .LBB11_7: ; %frem.loop_exit +; GFX1150-NEXT: .LBB11_7: ; %frem.loop_exit24 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-NEXT: v_add_nc_u32_e32 v4, -11, v4 ; GFX1150-NEXT: v_ldexp_f32 v2, v2, v4 @@ -12824,7 +12824,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_cmp_ngt_f32 s6, s8 ; GFX1150-NEXT: s_cbranch_scc0 .LBB11_10 -; GFX1150-NEXT: ; %bb.9: ; %frem.else16 +; GFX1150-NEXT: ; %bb.9: ; %frem.else ; GFX1150-NEXT: s_cmp_eq_f32 s6, s8 ; GFX1150-NEXT: v_bfi_b32 v1, 0x7fffffff, 0, s5 ; GFX1150-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -12834,7 +12834,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_branch .LBB11_16 ; GFX1150-NEXT: .LBB11_10: ; GFX1150-NEXT: ; implicit-def: $vgpr1 -; GFX1150-NEXT: .LBB11_11: ; %frem.compute15 +; GFX1150-NEXT: .LBB11_11: ; %frem.compute ; GFX1150-NEXT: v_frexp_mant_f32_e64 v2, |s2| ; GFX1150-NEXT: v_frexp_mant_f32_e64 v1, |s5| ; GFX1150-NEXT: v_frexp_exp_i32_f32_e32 v4, s5 @@ -12869,11 +12869,11 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v5 ; GFX1150-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; GFX1150-NEXT: s_cbranch_vccnz .LBB11_15 -; GFX1150-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX1150-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX1150-NEXT: s_sub_i32 s7, s7, s8 ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_add_i32 s7, s7, 12 -; GFX1150-NEXT: .LBB11_13: ; %frem.loop_body23 +; GFX1150-NEXT: .LBB11_13: ; %frem.loop_body ; GFX1150-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-NEXT: v_mov_b32_e32 v6, v3 @@ -12895,7 +12895,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: ; %bb.14: ; %Flow ; GFX1150-NEXT: v_mov_b32_e32 v5, s7 ; GFX1150-NEXT: v_mov_b32_e32 v3, v6 -; GFX1150-NEXT: .LBB11_15: ; %frem.loop_exit24 +; GFX1150-NEXT: .LBB11_15: ; %frem.loop_exit ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-NEXT: v_add_nc_u32_e32 v5, -11, v5 ; GFX1150-NEXT: v_ldexp_f32 v3, v3, v5 @@ -12950,7 +12950,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1200-NEXT: s_cmp_ngt_f32 s3, s8 ; GFX1200-NEXT: s_cbranch_scc0 .LBB11_2 -; GFX1200-NEXT: ; %bb.1: ; %frem.else +; GFX1200-NEXT: ; %bb.1: ; %frem.else16 ; GFX1200-NEXT: s_cmp_eq_f32 s3, s8 ; GFX1200-NEXT: v_bfi_b32 v0, 0x7fffffff, 0, s6 ; GFX1200-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -12960,7 +12960,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_branch .LBB11_8 ; GFX1200-NEXT: .LBB11_2: ; GFX1200-NEXT: ; implicit-def: $vgpr0 -; GFX1200-NEXT: .LBB11_3: ; %frem.compute +; GFX1200-NEXT: .LBB11_3: ; %frem.compute15 ; GFX1200-NEXT: v_frexp_mant_f32_e64 v1, |s4| ; GFX1200-NEXT: v_frexp_mant_f32_e64 v0, |s6| ; GFX1200-NEXT: v_frexp_exp_i32_f32_e32 v3, s6 @@ -12996,11 +12996,11 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v4 ; GFX1200-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; GFX1200-NEXT: s_cbranch_vccnz .LBB11_7 -; GFX1200-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1200-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; GFX1200-NEXT: s_sub_co_i32 s7, s7, s8 ; GFX1200-NEXT: s_wait_alu 0xfffe ; GFX1200-NEXT: s_add_co_i32 s7, s7, 12 -; GFX1200-NEXT: .LBB11_5: ; %frem.loop_body +; GFX1200-NEXT: .LBB11_5: ; %frem.loop_body23 ; GFX1200-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-NEXT: v_mov_b32_e32 v5, v2 @@ -13024,7 +13024,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: ; %bb.6: ; %Flow51 ; GFX1200-NEXT: v_mov_b32_e32 v4, s7 ; GFX1200-NEXT: v_mov_b32_e32 v2, v5 -; GFX1200-NEXT: .LBB11_7: ; %frem.loop_exit +; GFX1200-NEXT: .LBB11_7: ; %frem.loop_exit24 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-NEXT: v_add_nc_u32_e32 v4, -11, v4 ; GFX1200-NEXT: v_ldexp_f32 v2, v2, v4 @@ -13048,7 +13048,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_wait_alu 0xfffe ; GFX1200-NEXT: s_cmp_ngt_f32 s6, s8 ; GFX1200-NEXT: s_cbranch_scc0 .LBB11_10 -; GFX1200-NEXT: ; %bb.9: ; %frem.else16 +; GFX1200-NEXT: ; %bb.9: ; %frem.else ; GFX1200-NEXT: s_cmp_eq_f32 s6, s8 ; GFX1200-NEXT: v_bfi_b32 v1, 0x7fffffff, 0, s5 ; GFX1200-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -13059,7 +13059,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_branch .LBB11_16 ; GFX1200-NEXT: .LBB11_10: ; GFX1200-NEXT: ; implicit-def: $vgpr1 -; GFX1200-NEXT: .LBB11_11: ; %frem.compute15 +; GFX1200-NEXT: .LBB11_11: ; %frem.compute ; GFX1200-NEXT: v_frexp_mant_f32_e64 v2, |s2| ; GFX1200-NEXT: v_frexp_mant_f32_e64 v1, |s5| ; GFX1200-NEXT: v_frexp_exp_i32_f32_e32 v4, s5 @@ -13095,11 +13095,11 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v5 ; GFX1200-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; GFX1200-NEXT: s_cbranch_vccnz .LBB11_15 -; GFX1200-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX1200-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX1200-NEXT: s_sub_co_i32 s7, s7, s8 ; GFX1200-NEXT: s_wait_alu 0xfffe ; GFX1200-NEXT: s_add_co_i32 s7, s7, 12 -; GFX1200-NEXT: .LBB11_13: ; %frem.loop_body23 +; GFX1200-NEXT: .LBB11_13: ; %frem.loop_body ; GFX1200-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-NEXT: v_mov_b32_e32 v6, v3 @@ -13123,7 +13123,7 @@ define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: ; %bb.14: ; %Flow ; GFX1200-NEXT: v_mov_b32_e32 v5, s7 ; GFX1200-NEXT: v_mov_b32_e32 v3, v6 -; GFX1200-NEXT: .LBB11_15: ; %frem.loop_exit24 +; GFX1200-NEXT: .LBB11_15: ; %frem.loop_exit ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-NEXT: v_add_nc_u32_e32 v5, -11, v5 ; GFX1200-NEXT: v_ldexp_f32 v3, v3, v5 @@ -13187,7 +13187,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v0|, |v4| ; SI-NEXT: s_and_b64 vcc, exec, s[2:3] ; SI-NEXT: s_cbranch_vccz .LBB12_2 -; SI-NEXT: ; %bb.1: ; %frem.else +; SI-NEXT: ; %bb.1: ; %frem.else78 ; SI-NEXT: s_brev_b32 s2, -2 ; SI-NEXT: v_bfi_b32 v8, s2, 0, v0 ; SI-NEXT: v_cmp_eq_f32_e64 vcc, |v0|, |v4| @@ -13198,7 +13198,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB12_2: ; SI-NEXT: ; implicit-def: $vgpr8 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB12_3: ; %frem.compute +; SI-NEXT: .LBB12_3: ; %frem.compute77 ; SI-NEXT: s_mov_b32 s6, 0x7f800000 ; SI-NEXT: v_cmp_lt_f32_e64 s[2:3], |v0|, s6 ; SI-NEXT: v_frexp_exp_i32_f32_e32 v8, v0 @@ -13234,10 +13234,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f32 v10, v10, v8, 1.0 ; SI-NEXT: s_cmp_lt_i32 s3, 13 ; SI-NEXT: s_cbranch_scc1 .LBB12_7 -; SI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; SI-NEXT: ; %bb.4: ; %frem.loop_body85.preheader ; SI-NEXT: s_sub_i32 s3, s4, s5 ; SI-NEXT: s_add_i32 s3, s3, 12 -; SI-NEXT: .LBB12_5: ; %frem.loop_body +; SI-NEXT: .LBB12_5: ; %frem.loop_body85 ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v11, v9 ; SI-NEXT: v_mul_f32_e32 v9, v11, v10 @@ -13252,7 +13252,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_cbranch_scc1 .LBB12_5 ; SI-NEXT: ; %bb.6: ; %Flow125 ; SI-NEXT: v_mov_b32_e32 v9, v11 -; SI-NEXT: .LBB12_7: ; %frem.loop_exit +; SI-NEXT: .LBB12_7: ; %frem.loop_exit86 ; SI-NEXT: s_add_i32 s3, s3, -11 ; SI-NEXT: v_ldexp_f32_e64 v9, v9, s3 ; SI-NEXT: v_mul_f32_e32 v10, v9, v10 @@ -13268,7 +13268,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v1|, |v5| ; SI-NEXT: s_and_b64 vcc, exec, s[2:3] ; SI-NEXT: s_cbranch_vccz .LBB12_10 -; SI-NEXT: ; %bb.9: ; %frem.else16 +; SI-NEXT: ; %bb.9: ; %frem.else47 ; SI-NEXT: s_brev_b32 s2, -2 ; SI-NEXT: v_bfi_b32 v9, s2, 0, v1 ; SI-NEXT: v_cmp_eq_f32_e64 vcc, |v1|, |v5| @@ -13279,7 +13279,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB12_10: ; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB12_11: ; %frem.compute15 +; SI-NEXT: .LBB12_11: ; %frem.compute46 ; SI-NEXT: s_mov_b32 s6, 0x7f800000 ; SI-NEXT: v_cmp_lt_f32_e64 s[2:3], |v1|, s6 ; SI-NEXT: v_frexp_exp_i32_f32_e32 v9, v1 @@ -13315,10 +13315,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f32 v11, v11, v9, 1.0 ; SI-NEXT: s_cmp_lt_i32 s3, 13 ; SI-NEXT: s_cbranch_scc1 .LBB12_15 -; SI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; SI-NEXT: ; %bb.12: ; %frem.loop_body54.preheader ; SI-NEXT: s_sub_i32 s3, s4, s5 ; SI-NEXT: s_add_i32 s3, s3, 12 -; SI-NEXT: .LBB12_13: ; %frem.loop_body23 +; SI-NEXT: .LBB12_13: ; %frem.loop_body54 ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v12, v10 ; SI-NEXT: v_mul_f32_e32 v10, v12, v11 @@ -13333,7 +13333,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_cbranch_scc1 .LBB12_13 ; SI-NEXT: ; %bb.14: ; %Flow121 ; SI-NEXT: v_mov_b32_e32 v10, v12 -; SI-NEXT: .LBB12_15: ; %frem.loop_exit24 +; SI-NEXT: .LBB12_15: ; %frem.loop_exit55 ; SI-NEXT: s_add_i32 s3, s3, -11 ; SI-NEXT: v_ldexp_f32_e64 v10, v10, s3 ; SI-NEXT: v_mul_f32_e32 v11, v10, v11 @@ -13349,7 +13349,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v2|, |v6| ; SI-NEXT: s_and_b64 vcc, exec, s[2:3] ; SI-NEXT: s_cbranch_vccz .LBB12_18 -; SI-NEXT: ; %bb.17: ; %frem.else47 +; SI-NEXT: ; %bb.17: ; %frem.else16 ; SI-NEXT: s_brev_b32 s2, -2 ; SI-NEXT: v_bfi_b32 v10, s2, 0, v2 ; SI-NEXT: v_cmp_eq_f32_e64 vcc, |v2|, |v6| @@ -13360,7 +13360,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB12_18: ; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB12_19: ; %frem.compute46 +; SI-NEXT: .LBB12_19: ; %frem.compute15 ; SI-NEXT: s_mov_b32 s6, 0x7f800000 ; SI-NEXT: v_cmp_lt_f32_e64 s[2:3], |v2|, s6 ; SI-NEXT: v_frexp_exp_i32_f32_e32 v10, v2 @@ -13396,10 +13396,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f32 v12, v12, v10, 1.0 ; SI-NEXT: s_cmp_lt_i32 s3, 13 ; SI-NEXT: s_cbranch_scc1 .LBB12_23 -; SI-NEXT: ; %bb.20: ; %frem.loop_body54.preheader +; SI-NEXT: ; %bb.20: ; %frem.loop_body23.preheader ; SI-NEXT: s_sub_i32 s3, s4, s5 ; SI-NEXT: s_add_i32 s3, s3, 12 -; SI-NEXT: .LBB12_21: ; %frem.loop_body54 +; SI-NEXT: .LBB12_21: ; %frem.loop_body23 ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v13, v11 ; SI-NEXT: v_mul_f32_e32 v11, v13, v12 @@ -13414,7 +13414,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_cbranch_scc1 .LBB12_21 ; SI-NEXT: ; %bb.22: ; %Flow117 ; SI-NEXT: v_mov_b32_e32 v11, v13 -; SI-NEXT: .LBB12_23: ; %frem.loop_exit55 +; SI-NEXT: .LBB12_23: ; %frem.loop_exit24 ; SI-NEXT: s_add_i32 s3, s3, -11 ; SI-NEXT: v_ldexp_f32_e64 v11, v11, s3 ; SI-NEXT: v_mul_f32_e32 v12, v11, v12 @@ -13430,7 +13430,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v3|, |v7| ; SI-NEXT: s_and_b64 vcc, exec, s[2:3] ; SI-NEXT: s_cbranch_vccz .LBB12_26 -; SI-NEXT: ; %bb.25: ; %frem.else78 +; SI-NEXT: ; %bb.25: ; %frem.else ; SI-NEXT: s_brev_b32 s2, -2 ; SI-NEXT: v_bfi_b32 v11, s2, 0, v3 ; SI-NEXT: v_cmp_eq_f32_e64 vcc, |v3|, |v7| @@ -13441,7 +13441,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB12_26: ; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB12_27: ; %frem.compute77 +; SI-NEXT: .LBB12_27: ; %frem.compute ; SI-NEXT: s_mov_b32 s6, 0x7f800000 ; SI-NEXT: v_cmp_lt_f32_e64 s[2:3], |v3|, s6 ; SI-NEXT: v_frexp_exp_i32_f32_e32 v11, v3 @@ -13477,10 +13477,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f32 v13, v13, v11, 1.0 ; SI-NEXT: s_cmp_lt_i32 s3, 13 ; SI-NEXT: s_cbranch_scc1 .LBB12_31 -; SI-NEXT: ; %bb.28: ; %frem.loop_body85.preheader +; SI-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; SI-NEXT: s_sub_i32 s3, s4, s5 ; SI-NEXT: s_add_i32 s3, s3, 12 -; SI-NEXT: .LBB12_29: ; %frem.loop_body85 +; SI-NEXT: .LBB12_29: ; %frem.loop_body ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v14, v12 ; SI-NEXT: v_mul_f32_e32 v12, v14, v13 @@ -13495,7 +13495,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: s_cbranch_scc1 .LBB12_29 ; SI-NEXT: ; %bb.30: ; %Flow ; SI-NEXT: v_mov_b32_e32 v12, v14 -; SI-NEXT: .LBB12_31: ; %frem.loop_exit86 +; SI-NEXT: .LBB12_31: ; %frem.loop_exit ; SI-NEXT: s_add_i32 s3, s3, -11 ; SI-NEXT: v_ldexp_f32_e64 v12, v12, s3 ; SI-NEXT: v_mul_f32_e32 v13, v12, v13 @@ -13548,7 +13548,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v0|, |v4| ; CI-NEXT: s_and_b64 vcc, exec, s[2:3] ; CI-NEXT: s_cbranch_vccz .LBB12_2 -; CI-NEXT: ; %bb.1: ; %frem.else +; CI-NEXT: ; %bb.1: ; %frem.else78 ; CI-NEXT: s_brev_b32 s2, -2 ; CI-NEXT: v_bfi_b32 v8, s2, 0, v0 ; CI-NEXT: v_cmp_eq_f32_e64 vcc, |v0|, |v4| @@ -13557,7 +13557,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB12_8 ; CI-NEXT: .LBB12_2: ; CI-NEXT: ; implicit-def: $vgpr8 -; CI-NEXT: .LBB12_3: ; %frem.compute +; CI-NEXT: .LBB12_3: ; %frem.compute77 ; CI-NEXT: v_frexp_mant_f32_e64 v9, |v4| ; CI-NEXT: v_ldexp_f32_e64 v9, v9, 1 ; CI-NEXT: v_div_scale_f32 v15, s[2:3], v9, v9, 1.0 @@ -13582,10 +13582,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 13, v10 ; CI-NEXT: v_div_fixup_f32 v12, v12, v9, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB12_7 -; CI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; CI-NEXT: ; %bb.4: ; %frem.loop_body85.preheader ; CI-NEXT: v_sub_i32_e32 v10, vcc, v13, v14 ; CI-NEXT: v_add_i32_e32 v10, vcc, 12, v10 -; CI-NEXT: .LBB12_5: ; %frem.loop_body +; CI-NEXT: .LBB12_5: ; %frem.loop_body85 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v13, v11 ; CI-NEXT: v_mul_f32_e32 v11, v13, v12 @@ -13600,7 +13600,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_cbranch_vccnz .LBB12_5 ; CI-NEXT: ; %bb.6: ; %Flow125 ; CI-NEXT: v_mov_b32_e32 v11, v13 -; CI-NEXT: .LBB12_7: ; %frem.loop_exit +; CI-NEXT: .LBB12_7: ; %frem.loop_exit86 ; CI-NEXT: v_add_i32_e32 v10, vcc, -11, v10 ; CI-NEXT: v_ldexp_f32_e32 v10, v11, v10 ; CI-NEXT: v_mul_f32_e32 v11, v10, v12 @@ -13616,7 +13616,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v1|, |v5| ; CI-NEXT: s_and_b64 vcc, exec, s[2:3] ; CI-NEXT: s_cbranch_vccz .LBB12_10 -; CI-NEXT: ; %bb.9: ; %frem.else16 +; CI-NEXT: ; %bb.9: ; %frem.else47 ; CI-NEXT: s_brev_b32 s2, -2 ; CI-NEXT: v_bfi_b32 v9, s2, 0, v1 ; CI-NEXT: v_cmp_eq_f32_e64 vcc, |v1|, |v5| @@ -13625,7 +13625,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB12_16 ; CI-NEXT: .LBB12_10: ; CI-NEXT: ; implicit-def: $vgpr9 -; CI-NEXT: .LBB12_11: ; %frem.compute15 +; CI-NEXT: .LBB12_11: ; %frem.compute46 ; CI-NEXT: v_frexp_mant_f32_e64 v10, |v5| ; CI-NEXT: v_ldexp_f32_e64 v10, v10, 1 ; CI-NEXT: v_div_scale_f32 v16, s[2:3], v10, v10, 1.0 @@ -13650,10 +13650,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 13, v11 ; CI-NEXT: v_div_fixup_f32 v13, v13, v10, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB12_15 -; CI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; CI-NEXT: ; %bb.12: ; %frem.loop_body54.preheader ; CI-NEXT: v_sub_i32_e32 v11, vcc, v14, v15 ; CI-NEXT: v_add_i32_e32 v11, vcc, 12, v11 -; CI-NEXT: .LBB12_13: ; %frem.loop_body23 +; CI-NEXT: .LBB12_13: ; %frem.loop_body54 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v14, v12 ; CI-NEXT: v_mul_f32_e32 v12, v14, v13 @@ -13668,7 +13668,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_cbranch_vccnz .LBB12_13 ; CI-NEXT: ; %bb.14: ; %Flow121 ; CI-NEXT: v_mov_b32_e32 v12, v14 -; CI-NEXT: .LBB12_15: ; %frem.loop_exit24 +; CI-NEXT: .LBB12_15: ; %frem.loop_exit55 ; CI-NEXT: v_add_i32_e32 v11, vcc, -11, v11 ; CI-NEXT: v_ldexp_f32_e32 v11, v12, v11 ; CI-NEXT: v_mul_f32_e32 v12, v11, v13 @@ -13684,7 +13684,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v2|, |v6| ; CI-NEXT: s_and_b64 vcc, exec, s[2:3] ; CI-NEXT: s_cbranch_vccz .LBB12_18 -; CI-NEXT: ; %bb.17: ; %frem.else47 +; CI-NEXT: ; %bb.17: ; %frem.else16 ; CI-NEXT: s_brev_b32 s2, -2 ; CI-NEXT: v_bfi_b32 v10, s2, 0, v2 ; CI-NEXT: v_cmp_eq_f32_e64 vcc, |v2|, |v6| @@ -13693,7 +13693,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB12_24 ; CI-NEXT: .LBB12_18: ; CI-NEXT: ; implicit-def: $vgpr10 -; CI-NEXT: .LBB12_19: ; %frem.compute46 +; CI-NEXT: .LBB12_19: ; %frem.compute15 ; CI-NEXT: v_frexp_mant_f32_e64 v11, |v6| ; CI-NEXT: v_ldexp_f32_e64 v11, v11, 1 ; CI-NEXT: v_div_scale_f32 v17, s[2:3], v11, v11, 1.0 @@ -13718,10 +13718,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 13, v12 ; CI-NEXT: v_div_fixup_f32 v14, v14, v11, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB12_23 -; CI-NEXT: ; %bb.20: ; %frem.loop_body54.preheader +; CI-NEXT: ; %bb.20: ; %frem.loop_body23.preheader ; CI-NEXT: v_sub_i32_e32 v12, vcc, v15, v16 ; CI-NEXT: v_add_i32_e32 v12, vcc, 12, v12 -; CI-NEXT: .LBB12_21: ; %frem.loop_body54 +; CI-NEXT: .LBB12_21: ; %frem.loop_body23 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v15, v13 ; CI-NEXT: v_mul_f32_e32 v13, v15, v14 @@ -13736,7 +13736,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_cbranch_vccnz .LBB12_21 ; CI-NEXT: ; %bb.22: ; %Flow117 ; CI-NEXT: v_mov_b32_e32 v13, v15 -; CI-NEXT: .LBB12_23: ; %frem.loop_exit55 +; CI-NEXT: .LBB12_23: ; %frem.loop_exit24 ; CI-NEXT: v_add_i32_e32 v12, vcc, -11, v12 ; CI-NEXT: v_ldexp_f32_e32 v12, v13, v12 ; CI-NEXT: v_mul_f32_e32 v13, v12, v14 @@ -13752,7 +13752,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v3|, |v7| ; CI-NEXT: s_and_b64 vcc, exec, s[2:3] ; CI-NEXT: s_cbranch_vccz .LBB12_26 -; CI-NEXT: ; %bb.25: ; %frem.else78 +; CI-NEXT: ; %bb.25: ; %frem.else ; CI-NEXT: s_brev_b32 s2, -2 ; CI-NEXT: v_bfi_b32 v11, s2, 0, v3 ; CI-NEXT: v_cmp_eq_f32_e64 vcc, |v3|, |v7| @@ -13761,7 +13761,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB12_32 ; CI-NEXT: .LBB12_26: ; CI-NEXT: ; implicit-def: $vgpr11 -; CI-NEXT: .LBB12_27: ; %frem.compute77 +; CI-NEXT: .LBB12_27: ; %frem.compute ; CI-NEXT: v_frexp_mant_f32_e64 v12, |v7| ; CI-NEXT: v_ldexp_f32_e64 v12, v12, 1 ; CI-NEXT: v_div_scale_f32 v18, s[2:3], v12, v12, 1.0 @@ -13786,10 +13786,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 13, v13 ; CI-NEXT: v_div_fixup_f32 v15, v15, v12, 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB12_31 -; CI-NEXT: ; %bb.28: ; %frem.loop_body85.preheader +; CI-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; CI-NEXT: v_sub_i32_e32 v13, vcc, v16, v17 ; CI-NEXT: v_add_i32_e32 v13, vcc, 12, v13 -; CI-NEXT: .LBB12_29: ; %frem.loop_body85 +; CI-NEXT: .LBB12_29: ; %frem.loop_body ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v16, v14 ; CI-NEXT: v_mul_f32_e32 v14, v16, v15 @@ -13804,7 +13804,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_cbranch_vccnz .LBB12_29 ; CI-NEXT: ; %bb.30: ; %Flow ; CI-NEXT: v_mov_b32_e32 v14, v16 -; CI-NEXT: .LBB12_31: ; %frem.loop_exit86 +; CI-NEXT: .LBB12_31: ; %frem.loop_exit ; CI-NEXT: v_add_i32_e32 v13, vcc, -11, v13 ; CI-NEXT: v_ldexp_f32_e32 v13, v14, v13 ; CI-NEXT: v_mul_f32_e32 v14, v13, v15 @@ -13857,7 +13857,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v0|, |v4| ; VI-NEXT: s_and_b64 vcc, exec, s[2:3] ; VI-NEXT: s_cbranch_vccz .LBB12_2 -; VI-NEXT: ; %bb.1: ; %frem.else +; VI-NEXT: ; %bb.1: ; %frem.else78 ; VI-NEXT: s_brev_b32 s2, -2 ; VI-NEXT: v_bfi_b32 v8, s2, 0, v0 ; VI-NEXT: v_cmp_eq_f32_e64 vcc, |v0|, |v4| @@ -13866,7 +13866,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB12_8 ; VI-NEXT: .LBB12_2: ; VI-NEXT: ; implicit-def: $vgpr8 -; VI-NEXT: .LBB12_3: ; %frem.compute +; VI-NEXT: .LBB12_3: ; %frem.compute77 ; VI-NEXT: v_frexp_mant_f32_e64 v9, |v4| ; VI-NEXT: v_ldexp_f32 v9, v9, 1 ; VI-NEXT: v_div_scale_f32 v15, s[2:3], v9, v9, 1.0 @@ -13891,10 +13891,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 13, v10 ; VI-NEXT: v_div_fixup_f32 v12, v12, v9, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB12_7 -; VI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; VI-NEXT: ; %bb.4: ; %frem.loop_body85.preheader ; VI-NEXT: v_sub_u32_e32 v10, vcc, v13, v14 ; VI-NEXT: v_add_u32_e32 v10, vcc, 12, v10 -; VI-NEXT: .LBB12_5: ; %frem.loop_body +; VI-NEXT: .LBB12_5: ; %frem.loop_body85 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v13, v11 ; VI-NEXT: v_mul_f32_e32 v11, v13, v12 @@ -13909,7 +13909,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_cbranch_vccnz .LBB12_5 ; VI-NEXT: ; %bb.6: ; %Flow125 ; VI-NEXT: v_mov_b32_e32 v11, v13 -; VI-NEXT: .LBB12_7: ; %frem.loop_exit +; VI-NEXT: .LBB12_7: ; %frem.loop_exit86 ; VI-NEXT: v_add_u32_e32 v10, vcc, -11, v10 ; VI-NEXT: v_ldexp_f32 v10, v11, v10 ; VI-NEXT: v_mul_f32_e32 v11, v10, v12 @@ -13925,7 +13925,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v1|, |v5| ; VI-NEXT: s_and_b64 vcc, exec, s[2:3] ; VI-NEXT: s_cbranch_vccz .LBB12_10 -; VI-NEXT: ; %bb.9: ; %frem.else16 +; VI-NEXT: ; %bb.9: ; %frem.else47 ; VI-NEXT: s_brev_b32 s2, -2 ; VI-NEXT: v_bfi_b32 v9, s2, 0, v1 ; VI-NEXT: v_cmp_eq_f32_e64 vcc, |v1|, |v5| @@ -13934,7 +13934,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB12_16 ; VI-NEXT: .LBB12_10: ; VI-NEXT: ; implicit-def: $vgpr9 -; VI-NEXT: .LBB12_11: ; %frem.compute15 +; VI-NEXT: .LBB12_11: ; %frem.compute46 ; VI-NEXT: v_frexp_mant_f32_e64 v10, |v5| ; VI-NEXT: v_ldexp_f32 v10, v10, 1 ; VI-NEXT: v_div_scale_f32 v16, s[2:3], v10, v10, 1.0 @@ -13959,10 +13959,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 13, v11 ; VI-NEXT: v_div_fixup_f32 v13, v13, v10, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB12_15 -; VI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; VI-NEXT: ; %bb.12: ; %frem.loop_body54.preheader ; VI-NEXT: v_sub_u32_e32 v11, vcc, v14, v15 ; VI-NEXT: v_add_u32_e32 v11, vcc, 12, v11 -; VI-NEXT: .LBB12_13: ; %frem.loop_body23 +; VI-NEXT: .LBB12_13: ; %frem.loop_body54 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v14, v12 ; VI-NEXT: v_mul_f32_e32 v12, v14, v13 @@ -13977,7 +13977,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_cbranch_vccnz .LBB12_13 ; VI-NEXT: ; %bb.14: ; %Flow121 ; VI-NEXT: v_mov_b32_e32 v12, v14 -; VI-NEXT: .LBB12_15: ; %frem.loop_exit24 +; VI-NEXT: .LBB12_15: ; %frem.loop_exit55 ; VI-NEXT: v_add_u32_e32 v11, vcc, -11, v11 ; VI-NEXT: v_ldexp_f32 v11, v12, v11 ; VI-NEXT: v_mul_f32_e32 v12, v11, v13 @@ -13993,7 +13993,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v2|, |v6| ; VI-NEXT: s_and_b64 vcc, exec, s[2:3] ; VI-NEXT: s_cbranch_vccz .LBB12_18 -; VI-NEXT: ; %bb.17: ; %frem.else47 +; VI-NEXT: ; %bb.17: ; %frem.else16 ; VI-NEXT: s_brev_b32 s2, -2 ; VI-NEXT: v_bfi_b32 v10, s2, 0, v2 ; VI-NEXT: v_cmp_eq_f32_e64 vcc, |v2|, |v6| @@ -14002,7 +14002,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB12_24 ; VI-NEXT: .LBB12_18: ; VI-NEXT: ; implicit-def: $vgpr10 -; VI-NEXT: .LBB12_19: ; %frem.compute46 +; VI-NEXT: .LBB12_19: ; %frem.compute15 ; VI-NEXT: v_frexp_mant_f32_e64 v11, |v6| ; VI-NEXT: v_ldexp_f32 v11, v11, 1 ; VI-NEXT: v_div_scale_f32 v17, s[2:3], v11, v11, 1.0 @@ -14027,10 +14027,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 13, v12 ; VI-NEXT: v_div_fixup_f32 v14, v14, v11, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB12_23 -; VI-NEXT: ; %bb.20: ; %frem.loop_body54.preheader +; VI-NEXT: ; %bb.20: ; %frem.loop_body23.preheader ; VI-NEXT: v_sub_u32_e32 v12, vcc, v15, v16 ; VI-NEXT: v_add_u32_e32 v12, vcc, 12, v12 -; VI-NEXT: .LBB12_21: ; %frem.loop_body54 +; VI-NEXT: .LBB12_21: ; %frem.loop_body23 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v15, v13 ; VI-NEXT: v_mul_f32_e32 v13, v15, v14 @@ -14045,7 +14045,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_cbranch_vccnz .LBB12_21 ; VI-NEXT: ; %bb.22: ; %Flow117 ; VI-NEXT: v_mov_b32_e32 v13, v15 -; VI-NEXT: .LBB12_23: ; %frem.loop_exit55 +; VI-NEXT: .LBB12_23: ; %frem.loop_exit24 ; VI-NEXT: v_add_u32_e32 v12, vcc, -11, v12 ; VI-NEXT: v_ldexp_f32 v12, v13, v12 ; VI-NEXT: v_mul_f32_e32 v13, v12, v14 @@ -14061,7 +14061,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v3|, |v7| ; VI-NEXT: s_and_b64 vcc, exec, s[2:3] ; VI-NEXT: s_cbranch_vccz .LBB12_26 -; VI-NEXT: ; %bb.25: ; %frem.else78 +; VI-NEXT: ; %bb.25: ; %frem.else ; VI-NEXT: s_brev_b32 s2, -2 ; VI-NEXT: v_bfi_b32 v11, s2, 0, v3 ; VI-NEXT: v_cmp_eq_f32_e64 vcc, |v3|, |v7| @@ -14070,7 +14070,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB12_32 ; VI-NEXT: .LBB12_26: ; VI-NEXT: ; implicit-def: $vgpr11 -; VI-NEXT: .LBB12_27: ; %frem.compute77 +; VI-NEXT: .LBB12_27: ; %frem.compute ; VI-NEXT: v_frexp_mant_f32_e64 v12, |v7| ; VI-NEXT: v_ldexp_f32 v12, v12, 1 ; VI-NEXT: v_div_scale_f32 v18, s[2:3], v12, v12, 1.0 @@ -14095,10 +14095,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 13, v13 ; VI-NEXT: v_div_fixup_f32 v15, v15, v12, 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB12_31 -; VI-NEXT: ; %bb.28: ; %frem.loop_body85.preheader +; VI-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; VI-NEXT: v_sub_u32_e32 v13, vcc, v16, v17 ; VI-NEXT: v_add_u32_e32 v13, vcc, 12, v13 -; VI-NEXT: .LBB12_29: ; %frem.loop_body85 +; VI-NEXT: .LBB12_29: ; %frem.loop_body ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v16, v14 ; VI-NEXT: v_mul_f32_e32 v14, v16, v15 @@ -14113,7 +14113,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_cbranch_vccnz .LBB12_29 ; VI-NEXT: ; %bb.30: ; %Flow ; VI-NEXT: v_mov_b32_e32 v14, v16 -; VI-NEXT: .LBB12_31: ; %frem.loop_exit86 +; VI-NEXT: .LBB12_31: ; %frem.loop_exit ; VI-NEXT: v_add_u32_e32 v13, vcc, -11, v13 ; VI-NEXT: v_ldexp_f32 v13, v14, v13 ; VI-NEXT: v_mul_f32_e32 v14, v13, v15 @@ -14161,7 +14161,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v0|, |v4| ; GFX9-NEXT: s_and_b64 vcc, exec, s[2:3] ; GFX9-NEXT: s_cbranch_vccz .LBB12_2 -; GFX9-NEXT: ; %bb.1: ; %frem.else +; GFX9-NEXT: ; %bb.1: ; %frem.else78 ; GFX9-NEXT: s_brev_b32 s2, -2 ; GFX9-NEXT: v_bfi_b32 v8, s2, 0, v0 ; GFX9-NEXT: v_cmp_eq_f32_e64 vcc, |v0|, |v4| @@ -14170,7 +14170,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB12_8 ; GFX9-NEXT: .LBB12_2: ; GFX9-NEXT: ; implicit-def: $vgpr8 -; GFX9-NEXT: .LBB12_3: ; %frem.compute +; GFX9-NEXT: .LBB12_3: ; %frem.compute77 ; GFX9-NEXT: v_frexp_mant_f32_e64 v9, |v4| ; GFX9-NEXT: v_ldexp_f32 v9, v9, 1 ; GFX9-NEXT: v_div_scale_f32 v15, s[2:3], v9, v9, 1.0 @@ -14195,10 +14195,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 13, v10 ; GFX9-NEXT: v_div_fixup_f32 v12, v12, v9, 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB12_7 -; GFX9-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX9-NEXT: ; %bb.4: ; %frem.loop_body85.preheader ; GFX9-NEXT: v_sub_u32_e32 v10, v13, v14 ; GFX9-NEXT: v_add_u32_e32 v10, 12, v10 -; GFX9-NEXT: .LBB12_5: ; %frem.loop_body +; GFX9-NEXT: .LBB12_5: ; %frem.loop_body85 ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v13, v11 ; GFX9-NEXT: v_mul_f32_e32 v11, v13, v12 @@ -14213,7 +14213,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_cbranch_vccnz .LBB12_5 ; GFX9-NEXT: ; %bb.6: ; %Flow125 ; GFX9-NEXT: v_mov_b32_e32 v11, v13 -; GFX9-NEXT: .LBB12_7: ; %frem.loop_exit +; GFX9-NEXT: .LBB12_7: ; %frem.loop_exit86 ; GFX9-NEXT: v_add_u32_e32 v10, -11, v10 ; GFX9-NEXT: v_ldexp_f32 v10, v11, v10 ; GFX9-NEXT: v_mul_f32_e32 v11, v10, v12 @@ -14229,7 +14229,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v1|, |v5| ; GFX9-NEXT: s_and_b64 vcc, exec, s[2:3] ; GFX9-NEXT: s_cbranch_vccz .LBB12_10 -; GFX9-NEXT: ; %bb.9: ; %frem.else16 +; GFX9-NEXT: ; %bb.9: ; %frem.else47 ; GFX9-NEXT: s_brev_b32 s2, -2 ; GFX9-NEXT: v_bfi_b32 v9, s2, 0, v1 ; GFX9-NEXT: v_cmp_eq_f32_e64 vcc, |v1|, |v5| @@ -14238,7 +14238,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB12_16 ; GFX9-NEXT: .LBB12_10: ; GFX9-NEXT: ; implicit-def: $vgpr9 -; GFX9-NEXT: .LBB12_11: ; %frem.compute15 +; GFX9-NEXT: .LBB12_11: ; %frem.compute46 ; GFX9-NEXT: v_frexp_mant_f32_e64 v10, |v5| ; GFX9-NEXT: v_ldexp_f32 v10, v10, 1 ; GFX9-NEXT: v_div_scale_f32 v16, s[2:3], v10, v10, 1.0 @@ -14263,10 +14263,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 13, v11 ; GFX9-NEXT: v_div_fixup_f32 v13, v13, v10, 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB12_15 -; GFX9-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX9-NEXT: ; %bb.12: ; %frem.loop_body54.preheader ; GFX9-NEXT: v_sub_u32_e32 v11, v14, v15 ; GFX9-NEXT: v_add_u32_e32 v11, 12, v11 -; GFX9-NEXT: .LBB12_13: ; %frem.loop_body23 +; GFX9-NEXT: .LBB12_13: ; %frem.loop_body54 ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v14, v12 ; GFX9-NEXT: v_mul_f32_e32 v12, v14, v13 @@ -14281,7 +14281,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_cbranch_vccnz .LBB12_13 ; GFX9-NEXT: ; %bb.14: ; %Flow121 ; GFX9-NEXT: v_mov_b32_e32 v12, v14 -; GFX9-NEXT: .LBB12_15: ; %frem.loop_exit24 +; GFX9-NEXT: .LBB12_15: ; %frem.loop_exit55 ; GFX9-NEXT: v_add_u32_e32 v11, -11, v11 ; GFX9-NEXT: v_ldexp_f32 v11, v12, v11 ; GFX9-NEXT: v_mul_f32_e32 v12, v11, v13 @@ -14297,7 +14297,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v2|, |v6| ; GFX9-NEXT: s_and_b64 vcc, exec, s[2:3] ; GFX9-NEXT: s_cbranch_vccz .LBB12_18 -; GFX9-NEXT: ; %bb.17: ; %frem.else47 +; GFX9-NEXT: ; %bb.17: ; %frem.else16 ; GFX9-NEXT: s_brev_b32 s2, -2 ; GFX9-NEXT: v_bfi_b32 v10, s2, 0, v2 ; GFX9-NEXT: v_cmp_eq_f32_e64 vcc, |v2|, |v6| @@ -14306,7 +14306,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB12_24 ; GFX9-NEXT: .LBB12_18: ; GFX9-NEXT: ; implicit-def: $vgpr10 -; GFX9-NEXT: .LBB12_19: ; %frem.compute46 +; GFX9-NEXT: .LBB12_19: ; %frem.compute15 ; GFX9-NEXT: v_frexp_mant_f32_e64 v11, |v6| ; GFX9-NEXT: v_ldexp_f32 v11, v11, 1 ; GFX9-NEXT: v_div_scale_f32 v17, s[2:3], v11, v11, 1.0 @@ -14331,10 +14331,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 13, v12 ; GFX9-NEXT: v_div_fixup_f32 v14, v14, v11, 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB12_23 -; GFX9-NEXT: ; %bb.20: ; %frem.loop_body54.preheader +; GFX9-NEXT: ; %bb.20: ; %frem.loop_body23.preheader ; GFX9-NEXT: v_sub_u32_e32 v12, v15, v16 ; GFX9-NEXT: v_add_u32_e32 v12, 12, v12 -; GFX9-NEXT: .LBB12_21: ; %frem.loop_body54 +; GFX9-NEXT: .LBB12_21: ; %frem.loop_body23 ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v15, v13 ; GFX9-NEXT: v_mul_f32_e32 v13, v15, v14 @@ -14349,7 +14349,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_cbranch_vccnz .LBB12_21 ; GFX9-NEXT: ; %bb.22: ; %Flow117 ; GFX9-NEXT: v_mov_b32_e32 v13, v15 -; GFX9-NEXT: .LBB12_23: ; %frem.loop_exit55 +; GFX9-NEXT: .LBB12_23: ; %frem.loop_exit24 ; GFX9-NEXT: v_add_u32_e32 v12, -11, v12 ; GFX9-NEXT: v_ldexp_f32 v12, v13, v12 ; GFX9-NEXT: v_mul_f32_e32 v13, v12, v14 @@ -14365,7 +14365,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_ngt_f32_e64 s[2:3], |v3|, |v7| ; GFX9-NEXT: s_and_b64 vcc, exec, s[2:3] ; GFX9-NEXT: s_cbranch_vccz .LBB12_26 -; GFX9-NEXT: ; %bb.25: ; %frem.else78 +; GFX9-NEXT: ; %bb.25: ; %frem.else ; GFX9-NEXT: s_brev_b32 s2, -2 ; GFX9-NEXT: v_bfi_b32 v11, s2, 0, v3 ; GFX9-NEXT: v_cmp_eq_f32_e64 vcc, |v3|, |v7| @@ -14374,7 +14374,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB12_32 ; GFX9-NEXT: .LBB12_26: ; GFX9-NEXT: ; implicit-def: $vgpr11 -; GFX9-NEXT: .LBB12_27: ; %frem.compute77 +; GFX9-NEXT: .LBB12_27: ; %frem.compute ; GFX9-NEXT: v_frexp_mant_f32_e64 v12, |v7| ; GFX9-NEXT: v_ldexp_f32 v12, v12, 1 ; GFX9-NEXT: v_div_scale_f32 v18, s[2:3], v12, v12, 1.0 @@ -14399,10 +14399,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 13, v13 ; GFX9-NEXT: v_div_fixup_f32 v15, v15, v12, 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB12_31 -; GFX9-NEXT: ; %bb.28: ; %frem.loop_body85.preheader +; GFX9-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX9-NEXT: v_sub_u32_e32 v13, v16, v17 ; GFX9-NEXT: v_add_u32_e32 v13, 12, v13 -; GFX9-NEXT: .LBB12_29: ; %frem.loop_body85 +; GFX9-NEXT: .LBB12_29: ; %frem.loop_body ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v16, v14 ; GFX9-NEXT: v_mul_f32_e32 v14, v16, v15 @@ -14417,7 +14417,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_cbranch_vccnz .LBB12_29 ; GFX9-NEXT: ; %bb.30: ; %Flow ; GFX9-NEXT: v_mov_b32_e32 v14, v16 -; GFX9-NEXT: .LBB12_31: ; %frem.loop_exit86 +; GFX9-NEXT: .LBB12_31: ; %frem.loop_exit ; GFX9-NEXT: v_add_u32_e32 v13, -11, v13 ; GFX9-NEXT: v_ldexp_f32 v13, v14, v13 ; GFX9-NEXT: v_mul_f32_e32 v14, v13, v15 @@ -14466,7 +14466,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_ngt_f32_e64 s2, |v0|, |v4| ; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX10-NEXT: s_cbranch_vccz .LBB12_2 -; GFX10-NEXT: ; %bb.1: ; %frem.else +; GFX10-NEXT: ; %bb.1: ; %frem.else78 ; GFX10-NEXT: v_bfi_b32 v8, 0x7fffffff, 0, v0 ; GFX10-NEXT: v_cmp_eq_f32_e64 vcc_lo, |v0|, |v4| ; GFX10-NEXT: v_cndmask_b32_e32 v8, v0, v8, vcc_lo @@ -14474,7 +14474,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB12_8 ; GFX10-NEXT: .LBB12_2: ; GFX10-NEXT: ; implicit-def: $vgpr8 -; GFX10-NEXT: .LBB12_3: ; %frem.compute +; GFX10-NEXT: .LBB12_3: ; %frem.compute77 ; GFX10-NEXT: v_frexp_mant_f32_e64 v9, |v4| ; GFX10-NEXT: v_frexp_mant_f32_e64 v8, |v0| ; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v11, v0 @@ -14501,10 +14501,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v12 ; GFX10-NEXT: v_div_fixup_f32 v11, v11, v9, 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB12_7 -; GFX10-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX10-NEXT: ; %bb.4: ; %frem.loop_body85.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 12 -; GFX10-NEXT: .LBB12_5: ; %frem.loop_body +; GFX10-NEXT: .LBB12_5: ; %frem.loop_body85 ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v13, v10 ; GFX10-NEXT: s_add_i32 s2, s2, -12 @@ -14520,7 +14520,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: ; %bb.6: ; %Flow125 ; GFX10-NEXT: v_mov_b32_e32 v12, s2 ; GFX10-NEXT: v_mov_b32_e32 v10, v13 -; GFX10-NEXT: .LBB12_7: ; %frem.loop_exit +; GFX10-NEXT: .LBB12_7: ; %frem.loop_exit86 ; GFX10-NEXT: v_add_nc_u32_e32 v12, -11, v12 ; GFX10-NEXT: v_ldexp_f32 v10, v10, v12 ; GFX10-NEXT: v_mul_f32_e32 v11, v10, v11 @@ -14535,7 +14535,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_ngt_f32_e64 s2, |v1|, |v5| ; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX10-NEXT: s_cbranch_vccz .LBB12_10 -; GFX10-NEXT: ; %bb.9: ; %frem.else16 +; GFX10-NEXT: ; %bb.9: ; %frem.else47 ; GFX10-NEXT: v_bfi_b32 v9, 0x7fffffff, 0, v1 ; GFX10-NEXT: v_cmp_eq_f32_e64 vcc_lo, |v1|, |v5| ; GFX10-NEXT: v_cndmask_b32_e32 v9, v1, v9, vcc_lo @@ -14543,7 +14543,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB12_16 ; GFX10-NEXT: .LBB12_10: ; GFX10-NEXT: ; implicit-def: $vgpr9 -; GFX10-NEXT: .LBB12_11: ; %frem.compute15 +; GFX10-NEXT: .LBB12_11: ; %frem.compute46 ; GFX10-NEXT: v_frexp_mant_f32_e64 v10, |v5| ; GFX10-NEXT: v_frexp_mant_f32_e64 v9, |v1| ; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v12, v1 @@ -14570,10 +14570,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v13 ; GFX10-NEXT: v_div_fixup_f32 v12, v12, v10, 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB12_15 -; GFX10-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX10-NEXT: ; %bb.12: ; %frem.loop_body54.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 12 -; GFX10-NEXT: .LBB12_13: ; %frem.loop_body23 +; GFX10-NEXT: .LBB12_13: ; %frem.loop_body54 ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v14, v11 ; GFX10-NEXT: s_add_i32 s2, s2, -12 @@ -14589,7 +14589,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: ; %bb.14: ; %Flow121 ; GFX10-NEXT: v_mov_b32_e32 v13, s2 ; GFX10-NEXT: v_mov_b32_e32 v11, v14 -; GFX10-NEXT: .LBB12_15: ; %frem.loop_exit24 +; GFX10-NEXT: .LBB12_15: ; %frem.loop_exit55 ; GFX10-NEXT: v_add_nc_u32_e32 v13, -11, v13 ; GFX10-NEXT: v_ldexp_f32 v11, v11, v13 ; GFX10-NEXT: v_mul_f32_e32 v12, v11, v12 @@ -14604,7 +14604,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_ngt_f32_e64 s2, |v2|, |v6| ; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX10-NEXT: s_cbranch_vccz .LBB12_18 -; GFX10-NEXT: ; %bb.17: ; %frem.else47 +; GFX10-NEXT: ; %bb.17: ; %frem.else16 ; GFX10-NEXT: v_bfi_b32 v10, 0x7fffffff, 0, v2 ; GFX10-NEXT: v_cmp_eq_f32_e64 vcc_lo, |v2|, |v6| ; GFX10-NEXT: v_cndmask_b32_e32 v10, v2, v10, vcc_lo @@ -14612,7 +14612,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB12_24 ; GFX10-NEXT: .LBB12_18: ; GFX10-NEXT: ; implicit-def: $vgpr10 -; GFX10-NEXT: .LBB12_19: ; %frem.compute46 +; GFX10-NEXT: .LBB12_19: ; %frem.compute15 ; GFX10-NEXT: v_frexp_mant_f32_e64 v11, |v6| ; GFX10-NEXT: v_frexp_mant_f32_e64 v10, |v2| ; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v13, v2 @@ -14639,10 +14639,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v14 ; GFX10-NEXT: v_div_fixup_f32 v13, v13, v11, 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB12_23 -; GFX10-NEXT: ; %bb.20: ; %frem.loop_body54.preheader +; GFX10-NEXT: ; %bb.20: ; %frem.loop_body23.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 12 -; GFX10-NEXT: .LBB12_21: ; %frem.loop_body54 +; GFX10-NEXT: .LBB12_21: ; %frem.loop_body23 ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v15, v12 ; GFX10-NEXT: s_add_i32 s2, s2, -12 @@ -14658,7 +14658,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: ; %bb.22: ; %Flow117 ; GFX10-NEXT: v_mov_b32_e32 v14, s2 ; GFX10-NEXT: v_mov_b32_e32 v12, v15 -; GFX10-NEXT: .LBB12_23: ; %frem.loop_exit55 +; GFX10-NEXT: .LBB12_23: ; %frem.loop_exit24 ; GFX10-NEXT: v_add_nc_u32_e32 v14, -11, v14 ; GFX10-NEXT: v_ldexp_f32 v12, v12, v14 ; GFX10-NEXT: v_mul_f32_e32 v13, v12, v13 @@ -14673,7 +14673,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_ngt_f32_e64 s2, |v3|, |v7| ; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX10-NEXT: s_cbranch_vccz .LBB12_26 -; GFX10-NEXT: ; %bb.25: ; %frem.else78 +; GFX10-NEXT: ; %bb.25: ; %frem.else ; GFX10-NEXT: v_bfi_b32 v11, 0x7fffffff, 0, v3 ; GFX10-NEXT: v_cmp_eq_f32_e64 vcc_lo, |v3|, |v7| ; GFX10-NEXT: v_cndmask_b32_e32 v11, v3, v11, vcc_lo @@ -14681,7 +14681,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB12_32 ; GFX10-NEXT: .LBB12_26: ; GFX10-NEXT: ; implicit-def: $vgpr11 -; GFX10-NEXT: .LBB12_27: ; %frem.compute77 +; GFX10-NEXT: .LBB12_27: ; %frem.compute ; GFX10-NEXT: v_frexp_mant_f32_e64 v12, |v7| ; GFX10-NEXT: v_frexp_mant_f32_e64 v11, |v3| ; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v14, v3 @@ -14708,10 +14708,10 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v15 ; GFX10-NEXT: v_div_fixup_f32 v14, v14, v12, 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB12_31 -; GFX10-NEXT: ; %bb.28: ; %frem.loop_body85.preheader +; GFX10-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 12 -; GFX10-NEXT: .LBB12_29: ; %frem.loop_body85 +; GFX10-NEXT: .LBB12_29: ; %frem.loop_body ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v16, v13 ; GFX10-NEXT: s_add_i32 s2, s2, -12 @@ -14727,7 +14727,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: ; %bb.30: ; %Flow ; GFX10-NEXT: v_mov_b32_e32 v15, s2 ; GFX10-NEXT: v_mov_b32_e32 v13, v16 -; GFX10-NEXT: .LBB12_31: ; %frem.loop_exit86 +; GFX10-NEXT: .LBB12_31: ; %frem.loop_exit ; GFX10-NEXT: v_add_nc_u32_e32 v15, -11, v15 ; GFX10-NEXT: v_ldexp_f32 v13, v13, v15 ; GFX10-NEXT: v_mul_f32_e32 v14, v13, v14 @@ -14773,7 +14773,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: v_cmp_ngt_f32_e64 s2, |v0|, |v4| ; GFX11-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX11-NEXT: s_cbranch_vccz .LBB12_2 -; GFX11-NEXT: ; %bb.1: ; %frem.else +; GFX11-NEXT: ; %bb.1: ; %frem.else78 ; GFX11-NEXT: v_bfi_b32 v8, 0x7fffffff, 0, v0 ; GFX11-NEXT: v_cmp_eq_f32_e64 vcc_lo, |v0|, |v4| ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) @@ -14782,7 +14782,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_branch .LBB12_8 ; GFX11-NEXT: .LBB12_2: ; GFX11-NEXT: ; implicit-def: $vgpr8 -; GFX11-NEXT: .LBB12_3: ; %frem.compute +; GFX11-NEXT: .LBB12_3: ; %frem.compute77 ; GFX11-NEXT: v_frexp_mant_f32_e64 v9, |v4| ; GFX11-NEXT: v_frexp_mant_f32_e64 v8, |v0| ; GFX11-NEXT: v_frexp_exp_i32_f32_e32 v11, v0 @@ -14818,11 +14818,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_div_fixup_f32 v11, v11, v9, 1.0 ; GFX11-NEXT: s_cbranch_vccnz .LBB12_7 -; GFX11-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX11-NEXT: ; %bb.4: ; %frem.loop_body85.preheader ; GFX11-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_add_i32 s2, s2, 12 -; GFX11-NEXT: .LBB12_5: ; %frem.loop_body +; GFX11-NEXT: .LBB12_5: ; %frem.loop_body85 ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_mov_b32_e32 v13, v10 @@ -14842,7 +14842,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: ; %bb.6: ; %Flow125 ; GFX11-NEXT: v_mov_b32_e32 v12, s2 ; GFX11-NEXT: v_mov_b32_e32 v10, v13 -; GFX11-NEXT: .LBB12_7: ; %frem.loop_exit +; GFX11-NEXT: .LBB12_7: ; %frem.loop_exit86 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_nc_u32_e32 v12, -11, v12 ; GFX11-NEXT: v_ldexp_f32 v10, v10, v12 @@ -14862,7 +14862,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: v_cmp_ngt_f32_e64 s2, |v1|, |v5| ; GFX11-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX11-NEXT: s_cbranch_vccz .LBB12_10 -; GFX11-NEXT: ; %bb.9: ; %frem.else16 +; GFX11-NEXT: ; %bb.9: ; %frem.else47 ; GFX11-NEXT: v_bfi_b32 v9, 0x7fffffff, 0, v1 ; GFX11-NEXT: v_cmp_eq_f32_e64 vcc_lo, |v1|, |v5| ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) @@ -14871,7 +14871,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_branch .LBB12_16 ; GFX11-NEXT: .LBB12_10: ; GFX11-NEXT: ; implicit-def: $vgpr9 -; GFX11-NEXT: .LBB12_11: ; %frem.compute15 +; GFX11-NEXT: .LBB12_11: ; %frem.compute46 ; GFX11-NEXT: v_frexp_mant_f32_e64 v10, |v5| ; GFX11-NEXT: v_frexp_mant_f32_e64 v9, |v1| ; GFX11-NEXT: v_frexp_exp_i32_f32_e32 v12, v1 @@ -14907,11 +14907,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_div_fixup_f32 v12, v12, v10, 1.0 ; GFX11-NEXT: s_cbranch_vccnz .LBB12_15 -; GFX11-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX11-NEXT: ; %bb.12: ; %frem.loop_body54.preheader ; GFX11-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_add_i32 s2, s2, 12 -; GFX11-NEXT: .LBB12_13: ; %frem.loop_body23 +; GFX11-NEXT: .LBB12_13: ; %frem.loop_body54 ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_mov_b32_e32 v14, v11 @@ -14931,7 +14931,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: ; %bb.14: ; %Flow121 ; GFX11-NEXT: v_mov_b32_e32 v13, s2 ; GFX11-NEXT: v_mov_b32_e32 v11, v14 -; GFX11-NEXT: .LBB12_15: ; %frem.loop_exit24 +; GFX11-NEXT: .LBB12_15: ; %frem.loop_exit55 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_nc_u32_e32 v13, -11, v13 ; GFX11-NEXT: v_ldexp_f32 v11, v11, v13 @@ -14951,7 +14951,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: v_cmp_ngt_f32_e64 s2, |v2|, |v6| ; GFX11-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX11-NEXT: s_cbranch_vccz .LBB12_18 -; GFX11-NEXT: ; %bb.17: ; %frem.else47 +; GFX11-NEXT: ; %bb.17: ; %frem.else16 ; GFX11-NEXT: v_bfi_b32 v10, 0x7fffffff, 0, v2 ; GFX11-NEXT: v_cmp_eq_f32_e64 vcc_lo, |v2|, |v6| ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) @@ -14960,7 +14960,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_branch .LBB12_24 ; GFX11-NEXT: .LBB12_18: ; GFX11-NEXT: ; implicit-def: $vgpr10 -; GFX11-NEXT: .LBB12_19: ; %frem.compute46 +; GFX11-NEXT: .LBB12_19: ; %frem.compute15 ; GFX11-NEXT: v_frexp_mant_f32_e64 v11, |v6| ; GFX11-NEXT: v_frexp_mant_f32_e64 v10, |v2| ; GFX11-NEXT: v_frexp_exp_i32_f32_e32 v13, v2 @@ -14996,11 +14996,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_div_fixup_f32 v13, v13, v11, 1.0 ; GFX11-NEXT: s_cbranch_vccnz .LBB12_23 -; GFX11-NEXT: ; %bb.20: ; %frem.loop_body54.preheader +; GFX11-NEXT: ; %bb.20: ; %frem.loop_body23.preheader ; GFX11-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_add_i32 s2, s2, 12 -; GFX11-NEXT: .LBB12_21: ; %frem.loop_body54 +; GFX11-NEXT: .LBB12_21: ; %frem.loop_body23 ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_mov_b32_e32 v15, v12 @@ -15020,7 +15020,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: ; %bb.22: ; %Flow117 ; GFX11-NEXT: v_mov_b32_e32 v14, s2 ; GFX11-NEXT: v_mov_b32_e32 v12, v15 -; GFX11-NEXT: .LBB12_23: ; %frem.loop_exit55 +; GFX11-NEXT: .LBB12_23: ; %frem.loop_exit24 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_nc_u32_e32 v14, -11, v14 ; GFX11-NEXT: v_ldexp_f32 v12, v12, v14 @@ -15040,7 +15040,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: v_cmp_ngt_f32_e64 s2, |v3|, |v7| ; GFX11-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX11-NEXT: s_cbranch_vccz .LBB12_26 -; GFX11-NEXT: ; %bb.25: ; %frem.else78 +; GFX11-NEXT: ; %bb.25: ; %frem.else ; GFX11-NEXT: v_bfi_b32 v11, 0x7fffffff, 0, v3 ; GFX11-NEXT: v_cmp_eq_f32_e64 vcc_lo, |v3|, |v7| ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) @@ -15049,7 +15049,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_branch .LBB12_32 ; GFX11-NEXT: .LBB12_26: ; GFX11-NEXT: ; implicit-def: $vgpr11 -; GFX11-NEXT: .LBB12_27: ; %frem.compute77 +; GFX11-NEXT: .LBB12_27: ; %frem.compute ; GFX11-NEXT: v_frexp_mant_f32_e64 v12, |v7| ; GFX11-NEXT: v_frexp_mant_f32_e64 v11, |v3| ; GFX11-NEXT: v_frexp_exp_i32_f32_e32 v14, v3 @@ -15085,11 +15085,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_div_fixup_f32 v14, v14, v12, 1.0 ; GFX11-NEXT: s_cbranch_vccnz .LBB12_31 -; GFX11-NEXT: ; %bb.28: ; %frem.loop_body85.preheader +; GFX11-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX11-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_add_i32 s2, s2, 12 -; GFX11-NEXT: .LBB12_29: ; %frem.loop_body85 +; GFX11-NEXT: .LBB12_29: ; %frem.loop_body ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_mov_b32_e32 v16, v13 @@ -15109,7 +15109,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: ; %bb.30: ; %Flow ; GFX11-NEXT: v_mov_b32_e32 v15, s2 ; GFX11-NEXT: v_mov_b32_e32 v13, v16 -; GFX11-NEXT: .LBB12_31: ; %frem.loop_exit86 +; GFX11-NEXT: .LBB12_31: ; %frem.loop_exit ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_nc_u32_e32 v15, -11, v15 ; GFX11-NEXT: v_ldexp_f32 v13, v13, v15 @@ -15170,7 +15170,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_cmp_ngt_f32 s5, s12 ; GFX1150-NEXT: s_cbranch_scc0 .LBB12_2 -; GFX1150-NEXT: ; %bb.1: ; %frem.else +; GFX1150-NEXT: ; %bb.1: ; %frem.else78 ; GFX1150-NEXT: s_cmp_eq_f32 s5, s12 ; GFX1150-NEXT: v_bfi_b32 v0, 0x7fffffff, 0, s8 ; GFX1150-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -15180,7 +15180,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_branch .LBB12_8 ; GFX1150-NEXT: .LBB12_2: ; GFX1150-NEXT: ; implicit-def: $vgpr0 -; GFX1150-NEXT: .LBB12_3: ; %frem.compute +; GFX1150-NEXT: .LBB12_3: ; %frem.compute77 ; GFX1150-NEXT: v_frexp_mant_f32_e64 v1, |s6| ; GFX1150-NEXT: v_frexp_mant_f32_e64 v0, |s8| ; GFX1150-NEXT: v_frexp_exp_i32_f32_e32 v3, s8 @@ -15215,11 +15215,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v4 ; GFX1150-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; GFX1150-NEXT: s_cbranch_vccnz .LBB12_7 -; GFX1150-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1150-NEXT: ; %bb.4: ; %frem.loop_body85.preheader ; GFX1150-NEXT: s_sub_i32 s11, s11, s12 ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_add_i32 s11, s11, 12 -; GFX1150-NEXT: .LBB12_5: ; %frem.loop_body +; GFX1150-NEXT: .LBB12_5: ; %frem.loop_body85 ; GFX1150-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-NEXT: v_mov_b32_e32 v5, v2 @@ -15241,7 +15241,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: ; %bb.6: ; %Flow125 ; GFX1150-NEXT: v_mov_b32_e32 v4, s11 ; GFX1150-NEXT: v_mov_b32_e32 v2, v5 -; GFX1150-NEXT: .LBB12_7: ; %frem.loop_exit +; GFX1150-NEXT: .LBB12_7: ; %frem.loop_exit86 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-NEXT: v_add_nc_u32_e32 v4, -11, v4 ; GFX1150-NEXT: v_ldexp_f32 v2, v2, v4 @@ -15264,7 +15264,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_cmp_ngt_f32 s8, s12 ; GFX1150-NEXT: s_cbranch_scc0 .LBB12_10 -; GFX1150-NEXT: ; %bb.9: ; %frem.else16 +; GFX1150-NEXT: ; %bb.9: ; %frem.else47 ; GFX1150-NEXT: s_cmp_eq_f32 s8, s12 ; GFX1150-NEXT: v_bfi_b32 v1, 0x7fffffff, 0, s10 ; GFX1150-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -15274,7 +15274,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_branch .LBB12_16 ; GFX1150-NEXT: .LBB12_10: ; GFX1150-NEXT: ; implicit-def: $vgpr1 -; GFX1150-NEXT: .LBB12_11: ; %frem.compute15 +; GFX1150-NEXT: .LBB12_11: ; %frem.compute46 ; GFX1150-NEXT: v_frexp_mant_f32_e64 v2, |s4| ; GFX1150-NEXT: v_frexp_mant_f32_e64 v1, |s10| ; GFX1150-NEXT: v_frexp_exp_i32_f32_e32 v4, s10 @@ -15309,11 +15309,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v5 ; GFX1150-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; GFX1150-NEXT: s_cbranch_vccnz .LBB12_15 -; GFX1150-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX1150-NEXT: ; %bb.12: ; %frem.loop_body54.preheader ; GFX1150-NEXT: s_sub_i32 s11, s11, s12 ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_add_i32 s11, s11, 12 -; GFX1150-NEXT: .LBB12_13: ; %frem.loop_body23 +; GFX1150-NEXT: .LBB12_13: ; %frem.loop_body54 ; GFX1150-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-NEXT: v_mov_b32_e32 v6, v3 @@ -15335,7 +15335,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: ; %bb.14: ; %Flow121 ; GFX1150-NEXT: v_mov_b32_e32 v5, s11 ; GFX1150-NEXT: v_mov_b32_e32 v3, v6 -; GFX1150-NEXT: .LBB12_15: ; %frem.loop_exit24 +; GFX1150-NEXT: .LBB12_15: ; %frem.loop_exit55 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-NEXT: v_add_nc_u32_e32 v5, -11, v5 ; GFX1150-NEXT: v_ldexp_f32 v3, v3, v5 @@ -15358,7 +15358,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_cmp_ngt_f32 s10, s12 ; GFX1150-NEXT: s_cbranch_scc0 .LBB12_18 -; GFX1150-NEXT: ; %bb.17: ; %frem.else47 +; GFX1150-NEXT: ; %bb.17: ; %frem.else16 ; GFX1150-NEXT: s_cmp_eq_f32 s10, s12 ; GFX1150-NEXT: v_bfi_b32 v2, 0x7fffffff, 0, s9 ; GFX1150-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -15368,7 +15368,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_branch .LBB12_24 ; GFX1150-NEXT: .LBB12_18: ; GFX1150-NEXT: ; implicit-def: $vgpr2 -; GFX1150-NEXT: .LBB12_19: ; %frem.compute46 +; GFX1150-NEXT: .LBB12_19: ; %frem.compute15 ; GFX1150-NEXT: v_frexp_mant_f32_e64 v3, |s3| ; GFX1150-NEXT: v_frexp_mant_f32_e64 v2, |s9| ; GFX1150-NEXT: v_frexp_exp_i32_f32_e32 v5, s9 @@ -15403,11 +15403,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v6 ; GFX1150-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; GFX1150-NEXT: s_cbranch_vccnz .LBB12_23 -; GFX1150-NEXT: ; %bb.20: ; %frem.loop_body54.preheader +; GFX1150-NEXT: ; %bb.20: ; %frem.loop_body23.preheader ; GFX1150-NEXT: s_sub_i32 s11, s11, s12 ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_add_i32 s11, s11, 12 -; GFX1150-NEXT: .LBB12_21: ; %frem.loop_body54 +; GFX1150-NEXT: .LBB12_21: ; %frem.loop_body23 ; GFX1150-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-NEXT: v_mov_b32_e32 v7, v4 @@ -15429,7 +15429,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: ; %bb.22: ; %Flow117 ; GFX1150-NEXT: v_mov_b32_e32 v6, s11 ; GFX1150-NEXT: v_mov_b32_e32 v4, v7 -; GFX1150-NEXT: .LBB12_23: ; %frem.loop_exit55 +; GFX1150-NEXT: .LBB12_23: ; %frem.loop_exit24 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-NEXT: v_add_nc_u32_e32 v6, -11, v6 ; GFX1150-NEXT: v_ldexp_f32 v4, v4, v6 @@ -15452,7 +15452,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_cmp_ngt_f32 s9, s12 ; GFX1150-NEXT: s_cbranch_scc0 .LBB12_26 -; GFX1150-NEXT: ; %bb.25: ; %frem.else78 +; GFX1150-NEXT: ; %bb.25: ; %frem.else ; GFX1150-NEXT: s_cmp_eq_f32 s9, s12 ; GFX1150-NEXT: v_bfi_b32 v3, 0x7fffffff, 0, s7 ; GFX1150-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -15462,7 +15462,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_branch .LBB12_32 ; GFX1150-NEXT: .LBB12_26: ; GFX1150-NEXT: ; implicit-def: $vgpr3 -; GFX1150-NEXT: .LBB12_27: ; %frem.compute77 +; GFX1150-NEXT: .LBB12_27: ; %frem.compute ; GFX1150-NEXT: v_frexp_mant_f32_e64 v4, |s2| ; GFX1150-NEXT: v_frexp_mant_f32_e64 v3, |s7| ; GFX1150-NEXT: v_frexp_exp_i32_f32_e32 v6, s7 @@ -15497,11 +15497,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v7 ; GFX1150-NEXT: v_div_fixup_f32 v6, v6, v4, 1.0 ; GFX1150-NEXT: s_cbranch_vccnz .LBB12_31 -; GFX1150-NEXT: ; %bb.28: ; %frem.loop_body85.preheader +; GFX1150-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX1150-NEXT: s_sub_i32 s11, s11, s12 ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_add_i32 s11, s11, 12 -; GFX1150-NEXT: .LBB12_29: ; %frem.loop_body85 +; GFX1150-NEXT: .LBB12_29: ; %frem.loop_body ; GFX1150-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-NEXT: v_mov_b32_e32 v8, v5 @@ -15523,7 +15523,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: ; %bb.30: ; %Flow ; GFX1150-NEXT: v_mov_b32_e32 v7, s11 ; GFX1150-NEXT: v_mov_b32_e32 v5, v8 -; GFX1150-NEXT: .LBB12_31: ; %frem.loop_exit86 +; GFX1150-NEXT: .LBB12_31: ; %frem.loop_exit ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-NEXT: v_add_nc_u32_e32 v7, -11, v7 ; GFX1150-NEXT: v_ldexp_f32 v5, v5, v7 @@ -15597,7 +15597,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1200-NEXT: s_cmp_ngt_f32 s5, s12 ; GFX1200-NEXT: s_cbranch_scc0 .LBB12_2 -; GFX1200-NEXT: ; %bb.1: ; %frem.else +; GFX1200-NEXT: ; %bb.1: ; %frem.else78 ; GFX1200-NEXT: s_cmp_eq_f32 s5, s12 ; GFX1200-NEXT: v_bfi_b32 v0, 0x7fffffff, 0, s8 ; GFX1200-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -15607,7 +15607,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_branch .LBB12_8 ; GFX1200-NEXT: .LBB12_2: ; GFX1200-NEXT: ; implicit-def: $vgpr0 -; GFX1200-NEXT: .LBB12_3: ; %frem.compute +; GFX1200-NEXT: .LBB12_3: ; %frem.compute77 ; GFX1200-NEXT: v_frexp_mant_f32_e64 v1, |s6| ; GFX1200-NEXT: v_frexp_mant_f32_e64 v0, |s8| ; GFX1200-NEXT: v_frexp_exp_i32_f32_e32 v3, s8 @@ -15643,11 +15643,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v4 ; GFX1200-NEXT: v_div_fixup_f32 v3, v3, v1, 1.0 ; GFX1200-NEXT: s_cbranch_vccnz .LBB12_7 -; GFX1200-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1200-NEXT: ; %bb.4: ; %frem.loop_body85.preheader ; GFX1200-NEXT: s_sub_co_i32 s11, s11, s12 ; GFX1200-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1200-NEXT: s_add_co_i32 s11, s11, 12 -; GFX1200-NEXT: .LBB12_5: ; %frem.loop_body +; GFX1200-NEXT: .LBB12_5: ; %frem.loop_body85 ; GFX1200-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1200-NEXT: v_mov_b32_e32 v5, v2 @@ -15670,7 +15670,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: ; %bb.6: ; %Flow125 ; GFX1200-NEXT: v_mov_b32_e32 v4, s11 ; GFX1200-NEXT: v_mov_b32_e32 v2, v5 -; GFX1200-NEXT: .LBB12_7: ; %frem.loop_exit +; GFX1200-NEXT: .LBB12_7: ; %frem.loop_exit86 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-NEXT: v_add_nc_u32_e32 v4, -11, v4 ; GFX1200-NEXT: v_ldexp_f32 v2, v2, v4 @@ -15694,7 +15694,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_wait_alu 0xfffe ; GFX1200-NEXT: s_cmp_ngt_f32 s8, s12 ; GFX1200-NEXT: s_cbranch_scc0 .LBB12_10 -; GFX1200-NEXT: ; %bb.9: ; %frem.else16 +; GFX1200-NEXT: ; %bb.9: ; %frem.else47 ; GFX1200-NEXT: s_cmp_eq_f32 s8, s12 ; GFX1200-NEXT: v_bfi_b32 v1, 0x7fffffff, 0, s10 ; GFX1200-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -15705,7 +15705,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_branch .LBB12_16 ; GFX1200-NEXT: .LBB12_10: ; GFX1200-NEXT: ; implicit-def: $vgpr1 -; GFX1200-NEXT: .LBB12_11: ; %frem.compute15 +; GFX1200-NEXT: .LBB12_11: ; %frem.compute46 ; GFX1200-NEXT: v_frexp_mant_f32_e64 v2, |s4| ; GFX1200-NEXT: v_frexp_mant_f32_e64 v1, |s10| ; GFX1200-NEXT: v_frexp_exp_i32_f32_e32 v4, s10 @@ -15741,11 +15741,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v5 ; GFX1200-NEXT: v_div_fixup_f32 v4, v4, v2, 1.0 ; GFX1200-NEXT: s_cbranch_vccnz .LBB12_15 -; GFX1200-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX1200-NEXT: ; %bb.12: ; %frem.loop_body54.preheader ; GFX1200-NEXT: s_sub_co_i32 s11, s11, s12 ; GFX1200-NEXT: s_wait_alu 0xfffe ; GFX1200-NEXT: s_add_co_i32 s11, s11, 12 -; GFX1200-NEXT: .LBB12_13: ; %frem.loop_body23 +; GFX1200-NEXT: .LBB12_13: ; %frem.loop_body54 ; GFX1200-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-NEXT: v_mov_b32_e32 v6, v3 @@ -15769,7 +15769,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: ; %bb.14: ; %Flow121 ; GFX1200-NEXT: v_mov_b32_e32 v5, s11 ; GFX1200-NEXT: v_mov_b32_e32 v3, v6 -; GFX1200-NEXT: .LBB12_15: ; %frem.loop_exit24 +; GFX1200-NEXT: .LBB12_15: ; %frem.loop_exit55 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-NEXT: v_add_nc_u32_e32 v5, -11, v5 ; GFX1200-NEXT: v_ldexp_f32 v3, v3, v5 @@ -15793,7 +15793,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_wait_alu 0xfffe ; GFX1200-NEXT: s_cmp_ngt_f32 s10, s12 ; GFX1200-NEXT: s_cbranch_scc0 .LBB12_18 -; GFX1200-NEXT: ; %bb.17: ; %frem.else47 +; GFX1200-NEXT: ; %bb.17: ; %frem.else16 ; GFX1200-NEXT: s_cmp_eq_f32 s10, s12 ; GFX1200-NEXT: v_bfi_b32 v2, 0x7fffffff, 0, s9 ; GFX1200-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -15804,7 +15804,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_branch .LBB12_24 ; GFX1200-NEXT: .LBB12_18: ; GFX1200-NEXT: ; implicit-def: $vgpr2 -; GFX1200-NEXT: .LBB12_19: ; %frem.compute46 +; GFX1200-NEXT: .LBB12_19: ; %frem.compute15 ; GFX1200-NEXT: v_frexp_mant_f32_e64 v3, |s3| ; GFX1200-NEXT: v_frexp_mant_f32_e64 v2, |s9| ; GFX1200-NEXT: v_frexp_exp_i32_f32_e32 v5, s9 @@ -15840,11 +15840,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v6 ; GFX1200-NEXT: v_div_fixup_f32 v5, v5, v3, 1.0 ; GFX1200-NEXT: s_cbranch_vccnz .LBB12_23 -; GFX1200-NEXT: ; %bb.20: ; %frem.loop_body54.preheader +; GFX1200-NEXT: ; %bb.20: ; %frem.loop_body23.preheader ; GFX1200-NEXT: s_sub_co_i32 s11, s11, s12 ; GFX1200-NEXT: s_wait_alu 0xfffe ; GFX1200-NEXT: s_add_co_i32 s11, s11, 12 -; GFX1200-NEXT: .LBB12_21: ; %frem.loop_body54 +; GFX1200-NEXT: .LBB12_21: ; %frem.loop_body23 ; GFX1200-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-NEXT: v_mov_b32_e32 v7, v4 @@ -15868,7 +15868,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: ; %bb.22: ; %Flow117 ; GFX1200-NEXT: v_mov_b32_e32 v6, s11 ; GFX1200-NEXT: v_mov_b32_e32 v4, v7 -; GFX1200-NEXT: .LBB12_23: ; %frem.loop_exit55 +; GFX1200-NEXT: .LBB12_23: ; %frem.loop_exit24 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-NEXT: v_add_nc_u32_e32 v6, -11, v6 ; GFX1200-NEXT: v_ldexp_f32 v4, v4, v6 @@ -15892,7 +15892,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_wait_alu 0xfffe ; GFX1200-NEXT: s_cmp_ngt_f32 s9, s12 ; GFX1200-NEXT: s_cbranch_scc0 .LBB12_26 -; GFX1200-NEXT: ; %bb.25: ; %frem.else78 +; GFX1200-NEXT: ; %bb.25: ; %frem.else ; GFX1200-NEXT: s_cmp_eq_f32 s9, s12 ; GFX1200-NEXT: v_bfi_b32 v3, 0x7fffffff, 0, s7 ; GFX1200-NEXT: s_cselect_b32 vcc_lo, -1, 0 @@ -15903,7 +15903,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_branch .LBB12_32 ; GFX1200-NEXT: .LBB12_26: ; GFX1200-NEXT: ; implicit-def: $vgpr3 -; GFX1200-NEXT: .LBB12_27: ; %frem.compute77 +; GFX1200-NEXT: .LBB12_27: ; %frem.compute ; GFX1200-NEXT: v_frexp_mant_f32_e64 v4, |s2| ; GFX1200-NEXT: v_frexp_mant_f32_e64 v3, |s7| ; GFX1200-NEXT: v_frexp_exp_i32_f32_e32 v6, s7 @@ -15939,11 +15939,11 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: v_cmp_gt_i32_e32 vcc_lo, 13, v7 ; GFX1200-NEXT: v_div_fixup_f32 v6, v6, v4, 1.0 ; GFX1200-NEXT: s_cbranch_vccnz .LBB12_31 -; GFX1200-NEXT: ; %bb.28: ; %frem.loop_body85.preheader +; GFX1200-NEXT: ; %bb.28: ; %frem.loop_body.preheader ; GFX1200-NEXT: s_sub_co_i32 s11, s11, s12 ; GFX1200-NEXT: s_wait_alu 0xfffe ; GFX1200-NEXT: s_add_co_i32 s11, s11, 12 -; GFX1200-NEXT: .LBB12_29: ; %frem.loop_body85 +; GFX1200-NEXT: .LBB12_29: ; %frem.loop_body ; GFX1200-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1200-NEXT: v_mov_b32_e32 v8, v5 @@ -15967,7 +15967,7 @@ define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: ; %bb.30: ; %Flow ; GFX1200-NEXT: v_mov_b32_e32 v7, s11 ; GFX1200-NEXT: v_mov_b32_e32 v5, v8 -; GFX1200-NEXT: .LBB12_31: ; %frem.loop_exit86 +; GFX1200-NEXT: .LBB12_31: ; %frem.loop_exit ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-NEXT: v_add_nc_u32_e32 v7, -11, v7 ; GFX1200-NEXT: v_ldexp_f32 v5, v5, v7 @@ -16048,7 +16048,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_cmp_ngt_f64_e64 s[0:1], |v[0:1]|, |v[4:5]| ; SI-NEXT: s_and_b64 vcc, exec, s[0:1] ; SI-NEXT: s_cbranch_vccz .LBB13_2 -; SI-NEXT: ; %bb.1: ; %frem.else +; SI-NEXT: ; %bb.1: ; %frem.else16 ; SI-NEXT: v_and_b32_e32 v8, 0x80000000, v1 ; SI-NEXT: v_cmp_eq_f64_e64 vcc, |v[0:1]|, |v[4:5]| ; SI-NEXT: v_cndmask_b32_e32 v9, v1, v8, vcc @@ -16059,7 +16059,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB13_2: ; SI-NEXT: ; implicit-def: $vgpr8_vgpr9 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB13_3: ; %frem.compute +; SI-NEXT: .LBB13_3: ; %frem.compute15 ; SI-NEXT: s_brev_b32 s5, -2 ; SI-NEXT: v_and_b32_e32 v10, 0x7fffffff, v1 ; SI-NEXT: s_mov_b32 s0, 0 @@ -16105,13 +16105,13 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f64 v[12:13], v[12:13], v[8:9], 1.0 ; SI-NEXT: s_cmp_lt_i32 s6, 27 ; SI-NEXT: s_cbranch_scc1 .LBB13_7 -; SI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; SI-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; SI-NEXT: s_sub_i32 s0, s3, s7 ; SI-NEXT: s_add_i32 s6, s0, 26 ; SI-NEXT: s_mov_b32 s3, 0x432fffff ; SI-NEXT: v_mov_b32_e32 v18, 0x43300000 ; SI-NEXT: v_mov_b32_e32 v14, 0 -; SI-NEXT: .LBB13_5: ; %frem.loop_body +; SI-NEXT: .LBB13_5: ; %frem.loop_body23 ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v17, v11 ; SI-NEXT: v_mov_b32_e32 v16, v10 @@ -16134,7 +16134,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: ; %bb.6: ; %Flow51 ; SI-NEXT: v_mov_b32_e32 v10, v16 ; SI-NEXT: v_mov_b32_e32 v11, v17 -; SI-NEXT: .LBB13_7: ; %frem.loop_exit +; SI-NEXT: .LBB13_7: ; %frem.loop_exit24 ; SI-NEXT: s_sub_i32 s0, s6, 25 ; SI-NEXT: v_ldexp_f64 v[10:11], v[10:11], s0 ; SI-NEXT: v_mul_f64 v[12:13], v[10:11], v[12:13] @@ -16160,7 +16160,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_cmp_ngt_f64_e64 s[0:1], |v[2:3]|, |v[6:7]| ; SI-NEXT: s_and_b64 vcc, exec, s[0:1] ; SI-NEXT: s_cbranch_vccz .LBB13_10 -; SI-NEXT: ; %bb.9: ; %frem.else16 +; SI-NEXT: ; %bb.9: ; %frem.else ; SI-NEXT: v_and_b32_e32 v10, 0x80000000, v3 ; SI-NEXT: v_cmp_eq_f64_e64 vcc, |v[2:3]|, |v[6:7]| ; SI-NEXT: v_cndmask_b32_e32 v11, v3, v10, vcc @@ -16171,7 +16171,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: .LBB13_10: ; SI-NEXT: ; implicit-def: $vgpr10_vgpr11 ; SI-NEXT: s_mov_b64 vcc, 0 -; SI-NEXT: .LBB13_11: ; %frem.compute15 +; SI-NEXT: .LBB13_11: ; %frem.compute ; SI-NEXT: s_brev_b32 s5, -2 ; SI-NEXT: v_and_b32_e32 v12, 0x7fffffff, v3 ; SI-NEXT: s_mov_b32 s0, 0 @@ -16217,13 +16217,13 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: v_div_fixup_f64 v[14:15], v[14:15], v[10:11], 1.0 ; SI-NEXT: s_cmp_lt_i32 s6, 27 ; SI-NEXT: s_cbranch_scc1 .LBB13_15 -; SI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; SI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; SI-NEXT: s_sub_i32 s0, s3, s7 ; SI-NEXT: s_add_i32 s6, s0, 26 ; SI-NEXT: s_mov_b32 s3, 0x432fffff ; SI-NEXT: v_mov_b32_e32 v20, 0x43300000 ; SI-NEXT: v_mov_b32_e32 v16, 0 -; SI-NEXT: .LBB13_13: ; %frem.loop_body23 +; SI-NEXT: .LBB13_13: ; %frem.loop_body ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: v_mov_b32_e32 v19, v13 ; SI-NEXT: v_mov_b32_e32 v18, v12 @@ -16246,7 +16246,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; SI-NEXT: ; %bb.14: ; %Flow ; SI-NEXT: v_mov_b32_e32 v12, v18 ; SI-NEXT: v_mov_b32_e32 v13, v19 -; SI-NEXT: .LBB13_15: ; %frem.loop_exit24 +; SI-NEXT: .LBB13_15: ; %frem.loop_exit ; SI-NEXT: s_sub_i32 s0, s6, 25 ; SI-NEXT: v_ldexp_f64 v[12:13], v[12:13], s0 ; SI-NEXT: v_mul_f64 v[14:15], v[12:13], v[14:15] @@ -16304,7 +16304,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ngt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]| ; CI-NEXT: s_and_b64 vcc, exec, s[2:3] ; CI-NEXT: s_cbranch_vccz .LBB13_2 -; CI-NEXT: ; %bb.1: ; %frem.else +; CI-NEXT: ; %bb.1: ; %frem.else16 ; CI-NEXT: v_cmp_eq_f64_e64 vcc, |v[0:1]|, |v[4:5]| ; CI-NEXT: v_and_b32_e32 v8, 0x80000000, v1 ; CI-NEXT: v_cndmask_b32_e32 v9, v1, v8, vcc @@ -16313,7 +16313,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB13_8 ; CI-NEXT: .LBB13_2: ; CI-NEXT: ; implicit-def: $vgpr8_vgpr9 -; CI-NEXT: .LBB13_3: ; %frem.compute +; CI-NEXT: .LBB13_3: ; %frem.compute15 ; CI-NEXT: v_frexp_mant_f64_e64 v[8:9], |v[0:1]| ; CI-NEXT: v_frexp_exp_i32_f64_e32 v15, v[4:5] ; CI-NEXT: v_frexp_exp_i32_f64_e32 v14, v[0:1] @@ -16337,10 +16337,10 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 27, v17 ; CI-NEXT: v_div_fixup_f64 v[12:13], v[12:13], v[8:9], 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB13_7 -; CI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; CI-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; CI-NEXT: v_sub_i32_e32 v14, vcc, v14, v15 ; CI-NEXT: v_add_i32_e32 v17, vcc, 26, v14 -; CI-NEXT: .LBB13_5: ; %frem.loop_body +; CI-NEXT: .LBB13_5: ; %frem.loop_body23 ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v15, v11 ; CI-NEXT: v_mov_b32_e32 v14, v10 @@ -16358,7 +16358,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: ; %bb.6: ; %Flow51 ; CI-NEXT: v_mov_b32_e32 v10, v14 ; CI-NEXT: v_mov_b32_e32 v11, v15 -; CI-NEXT: .LBB13_7: ; %frem.loop_exit +; CI-NEXT: .LBB13_7: ; %frem.loop_exit24 ; CI-NEXT: v_subrev_i32_e32 v14, vcc, 25, v17 ; CI-NEXT: v_ldexp_f64 v[10:11], v[10:11], v14 ; CI-NEXT: s_brev_b32 s2, -2 @@ -16375,7 +16375,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_ngt_f64_e64 s[2:3], |v[2:3]|, |v[6:7]| ; CI-NEXT: s_and_b64 vcc, exec, s[2:3] ; CI-NEXT: s_cbranch_vccz .LBB13_10 -; CI-NEXT: ; %bb.9: ; %frem.else16 +; CI-NEXT: ; %bb.9: ; %frem.else ; CI-NEXT: v_cmp_eq_f64_e64 vcc, |v[2:3]|, |v[6:7]| ; CI-NEXT: v_and_b32_e32 v10, 0x80000000, v3 ; CI-NEXT: v_cndmask_b32_e32 v11, v3, v10, vcc @@ -16384,7 +16384,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: s_branch .LBB13_16 ; CI-NEXT: .LBB13_10: ; CI-NEXT: ; implicit-def: $vgpr10_vgpr11 -; CI-NEXT: .LBB13_11: ; %frem.compute15 +; CI-NEXT: .LBB13_11: ; %frem.compute ; CI-NEXT: v_frexp_mant_f64_e64 v[10:11], |v[2:3]| ; CI-NEXT: v_frexp_exp_i32_f64_e32 v17, v[6:7] ; CI-NEXT: v_frexp_exp_i32_f64_e32 v16, v[2:3] @@ -16408,10 +16408,10 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: v_cmp_gt_i32_e32 vcc, 27, v19 ; CI-NEXT: v_div_fixup_f64 v[14:15], v[14:15], v[10:11], 1.0 ; CI-NEXT: s_cbranch_vccnz .LBB13_15 -; CI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; CI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; CI-NEXT: v_sub_i32_e32 v16, vcc, v16, v17 ; CI-NEXT: v_add_i32_e32 v19, vcc, 26, v16 -; CI-NEXT: .LBB13_13: ; %frem.loop_body23 +; CI-NEXT: .LBB13_13: ; %frem.loop_body ; CI-NEXT: ; =>This Inner Loop Header: Depth=1 ; CI-NEXT: v_mov_b32_e32 v17, v13 ; CI-NEXT: v_mov_b32_e32 v16, v12 @@ -16429,7 +16429,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; CI-NEXT: ; %bb.14: ; %Flow ; CI-NEXT: v_mov_b32_e32 v12, v16 ; CI-NEXT: v_mov_b32_e32 v13, v17 -; CI-NEXT: .LBB13_15: ; %frem.loop_exit24 +; CI-NEXT: .LBB13_15: ; %frem.loop_exit ; CI-NEXT: v_subrev_i32_e32 v16, vcc, 25, v19 ; CI-NEXT: v_ldexp_f64 v[12:13], v[12:13], v16 ; CI-NEXT: s_brev_b32 s2, -2 @@ -16478,7 +16478,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ngt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]| ; VI-NEXT: s_and_b64 vcc, exec, s[2:3] ; VI-NEXT: s_cbranch_vccz .LBB13_2 -; VI-NEXT: ; %bb.1: ; %frem.else +; VI-NEXT: ; %bb.1: ; %frem.else16 ; VI-NEXT: v_cmp_eq_f64_e64 vcc, |v[0:1]|, |v[4:5]| ; VI-NEXT: v_and_b32_e32 v8, 0x80000000, v1 ; VI-NEXT: v_cndmask_b32_e32 v9, v1, v8, vcc @@ -16487,7 +16487,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB13_8 ; VI-NEXT: .LBB13_2: ; VI-NEXT: ; implicit-def: $vgpr8_vgpr9 -; VI-NEXT: .LBB13_3: ; %frem.compute +; VI-NEXT: .LBB13_3: ; %frem.compute15 ; VI-NEXT: v_frexp_mant_f64_e64 v[8:9], |v[0:1]| ; VI-NEXT: v_frexp_exp_i32_f64_e32 v15, v[4:5] ; VI-NEXT: v_frexp_exp_i32_f64_e32 v14, v[0:1] @@ -16511,10 +16511,10 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 27, v17 ; VI-NEXT: v_div_fixup_f64 v[12:13], v[12:13], v[8:9], 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB13_7 -; VI-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; VI-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; VI-NEXT: v_sub_u32_e32 v14, vcc, v14, v15 ; VI-NEXT: v_add_u32_e32 v17, vcc, 26, v14 -; VI-NEXT: .LBB13_5: ; %frem.loop_body +; VI-NEXT: .LBB13_5: ; %frem.loop_body23 ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v15, v11 ; VI-NEXT: v_mov_b32_e32 v14, v10 @@ -16532,7 +16532,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: ; %bb.6: ; %Flow51 ; VI-NEXT: v_mov_b32_e32 v10, v14 ; VI-NEXT: v_mov_b32_e32 v11, v15 -; VI-NEXT: .LBB13_7: ; %frem.loop_exit +; VI-NEXT: .LBB13_7: ; %frem.loop_exit24 ; VI-NEXT: v_subrev_u32_e32 v14, vcc, 25, v17 ; VI-NEXT: v_ldexp_f64 v[10:11], v[10:11], v14 ; VI-NEXT: s_brev_b32 s2, -2 @@ -16549,7 +16549,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_ngt_f64_e64 s[2:3], |v[2:3]|, |v[6:7]| ; VI-NEXT: s_and_b64 vcc, exec, s[2:3] ; VI-NEXT: s_cbranch_vccz .LBB13_10 -; VI-NEXT: ; %bb.9: ; %frem.else16 +; VI-NEXT: ; %bb.9: ; %frem.else ; VI-NEXT: v_cmp_eq_f64_e64 vcc, |v[2:3]|, |v[6:7]| ; VI-NEXT: v_and_b32_e32 v10, 0x80000000, v3 ; VI-NEXT: v_cndmask_b32_e32 v11, v3, v10, vcc @@ -16558,7 +16558,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: s_branch .LBB13_16 ; VI-NEXT: .LBB13_10: ; VI-NEXT: ; implicit-def: $vgpr10_vgpr11 -; VI-NEXT: .LBB13_11: ; %frem.compute15 +; VI-NEXT: .LBB13_11: ; %frem.compute ; VI-NEXT: v_frexp_mant_f64_e64 v[10:11], |v[2:3]| ; VI-NEXT: v_frexp_exp_i32_f64_e32 v17, v[6:7] ; VI-NEXT: v_frexp_exp_i32_f64_e32 v16, v[2:3] @@ -16582,10 +16582,10 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: v_cmp_gt_i32_e32 vcc, 27, v19 ; VI-NEXT: v_div_fixup_f64 v[14:15], v[14:15], v[10:11], 1.0 ; VI-NEXT: s_cbranch_vccnz .LBB13_15 -; VI-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; VI-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; VI-NEXT: v_sub_u32_e32 v16, vcc, v16, v17 ; VI-NEXT: v_add_u32_e32 v19, vcc, 26, v16 -; VI-NEXT: .LBB13_13: ; %frem.loop_body23 +; VI-NEXT: .LBB13_13: ; %frem.loop_body ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: v_mov_b32_e32 v17, v13 ; VI-NEXT: v_mov_b32_e32 v16, v12 @@ -16603,7 +16603,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; VI-NEXT: ; %bb.14: ; %Flow ; VI-NEXT: v_mov_b32_e32 v12, v16 ; VI-NEXT: v_mov_b32_e32 v13, v17 -; VI-NEXT: .LBB13_15: ; %frem.loop_exit24 +; VI-NEXT: .LBB13_15: ; %frem.loop_exit ; VI-NEXT: v_subrev_u32_e32 v16, vcc, 25, v19 ; VI-NEXT: v_ldexp_f64 v[12:13], v[12:13], v16 ; VI-NEXT: s_brev_b32 s2, -2 @@ -16647,7 +16647,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_ngt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]| ; GFX9-NEXT: s_and_b64 vcc, exec, s[2:3] ; GFX9-NEXT: s_cbranch_vccz .LBB13_2 -; GFX9-NEXT: ; %bb.1: ; %frem.else +; GFX9-NEXT: ; %bb.1: ; %frem.else16 ; GFX9-NEXT: v_cmp_eq_f64_e64 vcc, |v[0:1]|, |v[4:5]| ; GFX9-NEXT: v_and_b32_e32 v8, 0x80000000, v1 ; GFX9-NEXT: v_cndmask_b32_e32 v9, v1, v8, vcc @@ -16656,7 +16656,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB13_8 ; GFX9-NEXT: .LBB13_2: ; GFX9-NEXT: ; implicit-def: $vgpr8_vgpr9 -; GFX9-NEXT: .LBB13_3: ; %frem.compute +; GFX9-NEXT: .LBB13_3: ; %frem.compute15 ; GFX9-NEXT: v_frexp_mant_f64_e64 v[8:9], |v[0:1]| ; GFX9-NEXT: v_frexp_exp_i32_f64_e32 v15, v[4:5] ; GFX9-NEXT: v_frexp_exp_i32_f64_e32 v14, v[0:1] @@ -16680,10 +16680,10 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 27, v17 ; GFX9-NEXT: v_div_fixup_f64 v[12:13], v[12:13], v[8:9], 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB13_7 -; GFX9-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX9-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; GFX9-NEXT: v_sub_u32_e32 v14, v14, v15 ; GFX9-NEXT: v_add_u32_e32 v17, 26, v14 -; GFX9-NEXT: .LBB13_5: ; %frem.loop_body +; GFX9-NEXT: .LBB13_5: ; %frem.loop_body23 ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v15, v11 ; GFX9-NEXT: v_mov_b32_e32 v14, v10 @@ -16701,7 +16701,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: ; %bb.6: ; %Flow51 ; GFX9-NEXT: v_mov_b32_e32 v10, v14 ; GFX9-NEXT: v_mov_b32_e32 v11, v15 -; GFX9-NEXT: .LBB13_7: ; %frem.loop_exit +; GFX9-NEXT: .LBB13_7: ; %frem.loop_exit24 ; GFX9-NEXT: v_subrev_u32_e32 v14, 25, v17 ; GFX9-NEXT: v_ldexp_f64 v[10:11], v[10:11], v14 ; GFX9-NEXT: s_brev_b32 s2, -2 @@ -16718,7 +16718,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_ngt_f64_e64 s[2:3], |v[2:3]|, |v[6:7]| ; GFX9-NEXT: s_and_b64 vcc, exec, s[2:3] ; GFX9-NEXT: s_cbranch_vccz .LBB13_10 -; GFX9-NEXT: ; %bb.9: ; %frem.else16 +; GFX9-NEXT: ; %bb.9: ; %frem.else ; GFX9-NEXT: v_cmp_eq_f64_e64 vcc, |v[2:3]|, |v[6:7]| ; GFX9-NEXT: v_and_b32_e32 v10, 0x80000000, v3 ; GFX9-NEXT: v_cndmask_b32_e32 v11, v3, v10, vcc @@ -16727,7 +16727,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_branch .LBB13_16 ; GFX9-NEXT: .LBB13_10: ; GFX9-NEXT: ; implicit-def: $vgpr10_vgpr11 -; GFX9-NEXT: .LBB13_11: ; %frem.compute15 +; GFX9-NEXT: .LBB13_11: ; %frem.compute ; GFX9-NEXT: v_frexp_mant_f64_e64 v[10:11], |v[2:3]| ; GFX9-NEXT: v_frexp_exp_i32_f64_e32 v17, v[6:7] ; GFX9-NEXT: v_frexp_exp_i32_f64_e32 v16, v[2:3] @@ -16751,10 +16751,10 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 27, v19 ; GFX9-NEXT: v_div_fixup_f64 v[14:15], v[14:15], v[10:11], 1.0 ; GFX9-NEXT: s_cbranch_vccnz .LBB13_15 -; GFX9-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX9-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX9-NEXT: v_sub_u32_e32 v16, v16, v17 ; GFX9-NEXT: v_add_u32_e32 v19, 26, v16 -; GFX9-NEXT: .LBB13_13: ; %frem.loop_body23 +; GFX9-NEXT: .LBB13_13: ; %frem.loop_body ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: v_mov_b32_e32 v17, v13 ; GFX9-NEXT: v_mov_b32_e32 v16, v12 @@ -16772,7 +16772,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: ; %bb.14: ; %Flow ; GFX9-NEXT: v_mov_b32_e32 v12, v16 ; GFX9-NEXT: v_mov_b32_e32 v13, v17 -; GFX9-NEXT: .LBB13_15: ; %frem.loop_exit24 +; GFX9-NEXT: .LBB13_15: ; %frem.loop_exit ; GFX9-NEXT: v_subrev_u32_e32 v16, 25, v19 ; GFX9-NEXT: v_ldexp_f64 v[12:13], v[12:13], v16 ; GFX9-NEXT: s_brev_b32 s2, -2 @@ -16817,7 +16817,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_ngt_f64_e64 s2, |v[0:1]|, |v[4:5]| ; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX10-NEXT: s_cbranch_vccz .LBB13_2 -; GFX10-NEXT: ; %bb.1: ; %frem.else +; GFX10-NEXT: ; %bb.1: ; %frem.else16 ; GFX10-NEXT: v_cmp_eq_f64_e64 vcc_lo, |v[0:1]|, |v[4:5]| ; GFX10-NEXT: v_and_b32_e32 v8, 0x80000000, v1 ; GFX10-NEXT: v_cndmask_b32_e32 v9, v1, v8, vcc_lo @@ -16826,7 +16826,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB13_8 ; GFX10-NEXT: .LBB13_2: ; GFX10-NEXT: ; implicit-def: $vgpr8_vgpr9 -; GFX10-NEXT: .LBB13_3: ; %frem.compute +; GFX10-NEXT: .LBB13_3: ; %frem.compute15 ; GFX10-NEXT: v_frexp_mant_f64_e64 v[8:9], |v[0:1]| ; GFX10-NEXT: v_frexp_exp_i32_f64_e32 v13, v[4:5] ; GFX10-NEXT: v_frexp_exp_i32_f64_e32 v12, v[0:1] @@ -16851,10 +16851,10 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 27, v17 ; GFX10-NEXT: v_div_fixup_f64 v[12:13], v[12:13], v[8:9], 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB13_7 -; GFX10-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX10-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 26 -; GFX10-NEXT: .LBB13_5: ; %frem.loop_body +; GFX10-NEXT: .LBB13_5: ; %frem.loop_body23 ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v15, v11 ; GFX10-NEXT: v_mov_b32_e32 v14, v10 @@ -16873,7 +16873,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_mov_b32_e32 v10, v14 ; GFX10-NEXT: v_mov_b32_e32 v17, s2 ; GFX10-NEXT: v_mov_b32_e32 v11, v15 -; GFX10-NEXT: .LBB13_7: ; %frem.loop_exit +; GFX10-NEXT: .LBB13_7: ; %frem.loop_exit24 ; GFX10-NEXT: v_subrev_nc_u32_e32 v14, 25, v17 ; GFX10-NEXT: v_ldexp_f64 v[10:11], v[10:11], v14 ; GFX10-NEXT: v_mul_f64 v[12:13], v[10:11], v[12:13] @@ -16889,7 +16889,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_ngt_f64_e64 s2, |v[2:3]|, |v[6:7]| ; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX10-NEXT: s_cbranch_vccz .LBB13_10 -; GFX10-NEXT: ; %bb.9: ; %frem.else16 +; GFX10-NEXT: ; %bb.9: ; %frem.else ; GFX10-NEXT: v_cmp_eq_f64_e64 vcc_lo, |v[2:3]|, |v[6:7]| ; GFX10-NEXT: v_and_b32_e32 v10, 0x80000000, v3 ; GFX10-NEXT: v_cndmask_b32_e32 v11, v3, v10, vcc_lo @@ -16898,7 +16898,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: s_branch .LBB13_16 ; GFX10-NEXT: .LBB13_10: ; GFX10-NEXT: ; implicit-def: $vgpr10_vgpr11 -; GFX10-NEXT: .LBB13_11: ; %frem.compute15 +; GFX10-NEXT: .LBB13_11: ; %frem.compute ; GFX10-NEXT: v_frexp_mant_f64_e64 v[10:11], |v[2:3]| ; GFX10-NEXT: v_frexp_exp_i32_f64_e32 v15, v[6:7] ; GFX10-NEXT: v_frexp_exp_i32_f64_e32 v14, v[2:3] @@ -16923,10 +16923,10 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 27, v19 ; GFX10-NEXT: v_div_fixup_f64 v[14:15], v[14:15], v[10:11], 1.0 ; GFX10-NEXT: s_cbranch_vccnz .LBB13_15 -; GFX10-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX10-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX10-NEXT: s_sub_i32 s2, s2, s3 ; GFX10-NEXT: s_add_i32 s2, s2, 26 -; GFX10-NEXT: .LBB13_13: ; %frem.loop_body23 +; GFX10-NEXT: .LBB13_13: ; %frem.loop_body ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: v_mov_b32_e32 v17, v13 ; GFX10-NEXT: v_mov_b32_e32 v16, v12 @@ -16945,7 +16945,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX10-NEXT: v_mov_b32_e32 v12, v16 ; GFX10-NEXT: v_mov_b32_e32 v19, s2 ; GFX10-NEXT: v_mov_b32_e32 v13, v17 -; GFX10-NEXT: .LBB13_15: ; %frem.loop_exit24 +; GFX10-NEXT: .LBB13_15: ; %frem.loop_exit ; GFX10-NEXT: v_subrev_nc_u32_e32 v16, 25, v19 ; GFX10-NEXT: v_ldexp_f64 v[12:13], v[12:13], v16 ; GFX10-NEXT: v_mul_f64 v[14:15], v[12:13], v[14:15] @@ -16986,7 +16986,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: v_cmp_ngt_f64_e64 s2, |v[0:1]|, |v[4:5]| ; GFX11-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX11-NEXT: s_cbranch_vccz .LBB13_2 -; GFX11-NEXT: ; %bb.1: ; %frem.else +; GFX11-NEXT: ; %bb.1: ; %frem.else16 ; GFX11-NEXT: v_cmp_eq_f64_e64 vcc_lo, |v[0:1]|, |v[4:5]| ; GFX11-NEXT: v_and_b32_e32 v8, 0x80000000, v1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) @@ -16996,7 +16996,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_branch .LBB13_8 ; GFX11-NEXT: .LBB13_2: ; GFX11-NEXT: ; implicit-def: $vgpr8_vgpr9 -; GFX11-NEXT: .LBB13_3: ; %frem.compute +; GFX11-NEXT: .LBB13_3: ; %frem.compute15 ; GFX11-NEXT: v_frexp_mant_f64_e64 v[8:9], |v[0:1]| ; GFX11-NEXT: v_frexp_exp_i32_f64_e32 v13, v[4:5] ; GFX11-NEXT: v_frexp_exp_i32_f64_e32 v12, v[0:1] @@ -17029,12 +17029,12 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_div_fixup_f64 v[12:13], v[12:13], v[8:9], 1.0 ; GFX11-NEXT: s_cbranch_vccnz .LBB13_7 -; GFX11-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX11-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; GFX11-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_add_i32 s2, s2, 26 ; GFX11-NEXT: .p2align 6 -; GFX11-NEXT: .LBB13_5: ; %frem.loop_body +; GFX11-NEXT: .LBB13_5: ; %frem.loop_body23 ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_dual_mov_b32 v15, v11 :: v_dual_mov_b32 v14, v10 @@ -17054,7 +17054,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: ; %bb.6: ; %Flow51 ; GFX11-NEXT: v_dual_mov_b32 v17, s2 :: v_dual_mov_b32 v10, v14 ; GFX11-NEXT: v_mov_b32_e32 v11, v15 -; GFX11-NEXT: .LBB13_7: ; %frem.loop_exit +; GFX11-NEXT: .LBB13_7: ; %frem.loop_exit24 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_subrev_nc_u32_e32 v14, 25, v17 ; GFX11-NEXT: v_ldexp_f64 v[10:11], v[10:11], v14 @@ -17074,7 +17074,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: v_cmp_ngt_f64_e64 s2, |v[2:3]|, |v[6:7]| ; GFX11-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX11-NEXT: s_cbranch_vccz .LBB13_10 -; GFX11-NEXT: ; %bb.9: ; %frem.else16 +; GFX11-NEXT: ; %bb.9: ; %frem.else ; GFX11-NEXT: v_cmp_eq_f64_e64 vcc_lo, |v[2:3]|, |v[6:7]| ; GFX11-NEXT: v_and_b32_e32 v10, 0x80000000, v3 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) @@ -17084,7 +17084,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_branch .LBB13_16 ; GFX11-NEXT: .LBB13_10: ; GFX11-NEXT: ; implicit-def: $vgpr10_vgpr11 -; GFX11-NEXT: .LBB13_11: ; %frem.compute15 +; GFX11-NEXT: .LBB13_11: ; %frem.compute ; GFX11-NEXT: v_frexp_mant_f64_e64 v[10:11], |v[2:3]| ; GFX11-NEXT: v_frexp_exp_i32_f64_e32 v15, v[6:7] ; GFX11-NEXT: v_frexp_exp_i32_f64_e32 v14, v[2:3] @@ -17117,12 +17117,12 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_div_fixup_f64 v[14:15], v[14:15], v[10:11], 1.0 ; GFX11-NEXT: s_cbranch_vccnz .LBB13_15 -; GFX11-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX11-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX11-NEXT: s_sub_i32 s2, s2, s3 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_add_i32 s2, s2, 26 ; GFX11-NEXT: .p2align 6 -; GFX11-NEXT: .LBB13_13: ; %frem.loop_body23 +; GFX11-NEXT: .LBB13_13: ; %frem.loop_body ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_dual_mov_b32 v17, v13 :: v_dual_mov_b32 v16, v12 @@ -17142,7 +17142,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX11-NEXT: ; %bb.14: ; %Flow ; GFX11-NEXT: v_dual_mov_b32 v19, s2 :: v_dual_mov_b32 v12, v16 ; GFX11-NEXT: v_mov_b32_e32 v13, v17 -; GFX11-NEXT: .LBB13_15: ; %frem.loop_exit24 +; GFX11-NEXT: .LBB13_15: ; %frem.loop_exit ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_subrev_nc_u32_e32 v16, 25, v19 ; GFX11-NEXT: v_ldexp_f64 v[12:13], v[12:13], v16 @@ -17187,7 +17187,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: v_cmp_ngt_f64_e64 s2, |v[0:1]|, |v[4:5]| ; GFX1150-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX1150-NEXT: s_cbranch_vccz .LBB13_2 -; GFX1150-NEXT: ; %bb.1: ; %frem.else +; GFX1150-NEXT: ; %bb.1: ; %frem.else16 ; GFX1150-NEXT: v_cmp_eq_f64_e64 vcc_lo, |v[0:1]|, |v[4:5]| ; GFX1150-NEXT: v_and_b32_e32 v8, 0x80000000, v1 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) @@ -17197,7 +17197,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_branch .LBB13_8 ; GFX1150-NEXT: .LBB13_2: ; GFX1150-NEXT: ; implicit-def: $vgpr8_vgpr9 -; GFX1150-NEXT: .LBB13_3: ; %frem.compute +; GFX1150-NEXT: .LBB13_3: ; %frem.compute15 ; GFX1150-NEXT: v_frexp_mant_f64_e64 v[8:9], |v[0:1]| ; GFX1150-NEXT: v_frexp_exp_i32_f64_e32 v13, v[4:5] ; GFX1150-NEXT: v_frexp_exp_i32_f64_e32 v12, v[0:1] @@ -17229,12 +17229,12 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: v_cmp_gt_i32_e32 vcc_lo, 27, v17 ; GFX1150-NEXT: v_div_fixup_f64 v[12:13], v[12:13], v[8:9], 1.0 ; GFX1150-NEXT: s_cbranch_vccnz .LBB13_7 -; GFX1150-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1150-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; GFX1150-NEXT: s_sub_i32 s2, s2, s3 ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_add_i32 s2, s2, 26 ; GFX1150-NEXT: .p2align 6 -; GFX1150-NEXT: .LBB13_5: ; %frem.loop_body +; GFX1150-NEXT: .LBB13_5: ; %frem.loop_body23 ; GFX1150-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-NEXT: v_dual_mov_b32 v15, v11 :: v_dual_mov_b32 v14, v10 @@ -17254,7 +17254,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: ; %bb.6: ; %Flow51 ; GFX1150-NEXT: v_dual_mov_b32 v17, s2 :: v_dual_mov_b32 v10, v14 ; GFX1150-NEXT: v_mov_b32_e32 v11, v15 -; GFX1150-NEXT: .LBB13_7: ; %frem.loop_exit +; GFX1150-NEXT: .LBB13_7: ; %frem.loop_exit24 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-NEXT: v_subrev_nc_u32_e32 v14, 25, v17 ; GFX1150-NEXT: v_ldexp_f64 v[10:11], v[10:11], v14 @@ -17274,7 +17274,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: v_cmp_ngt_f64_e64 s2, |v[2:3]|, |v[6:7]| ; GFX1150-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX1150-NEXT: s_cbranch_vccz .LBB13_10 -; GFX1150-NEXT: ; %bb.9: ; %frem.else16 +; GFX1150-NEXT: ; %bb.9: ; %frem.else ; GFX1150-NEXT: v_cmp_eq_f64_e64 vcc_lo, |v[2:3]|, |v[6:7]| ; GFX1150-NEXT: v_and_b32_e32 v10, 0x80000000, v3 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) @@ -17284,7 +17284,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: s_branch .LBB13_16 ; GFX1150-NEXT: .LBB13_10: ; GFX1150-NEXT: ; implicit-def: $vgpr10_vgpr11 -; GFX1150-NEXT: .LBB13_11: ; %frem.compute15 +; GFX1150-NEXT: .LBB13_11: ; %frem.compute ; GFX1150-NEXT: v_frexp_mant_f64_e64 v[10:11], |v[2:3]| ; GFX1150-NEXT: v_frexp_exp_i32_f64_e32 v15, v[6:7] ; GFX1150-NEXT: v_frexp_exp_i32_f64_e32 v14, v[2:3] @@ -17316,12 +17316,12 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: v_cmp_gt_i32_e32 vcc_lo, 27, v19 ; GFX1150-NEXT: v_div_fixup_f64 v[14:15], v[14:15], v[10:11], 1.0 ; GFX1150-NEXT: s_cbranch_vccnz .LBB13_15 -; GFX1150-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX1150-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX1150-NEXT: s_sub_i32 s2, s2, s3 ; GFX1150-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1150-NEXT: s_add_i32 s2, s2, 26 ; GFX1150-NEXT: .p2align 6 -; GFX1150-NEXT: .LBB13_13: ; %frem.loop_body23 +; GFX1150-NEXT: .LBB13_13: ; %frem.loop_body ; GFX1150-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1150-NEXT: v_dual_mov_b32 v17, v13 :: v_dual_mov_b32 v16, v12 @@ -17341,7 +17341,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1150-NEXT: ; %bb.14: ; %Flow ; GFX1150-NEXT: v_dual_mov_b32 v19, s2 :: v_dual_mov_b32 v12, v16 ; GFX1150-NEXT: v_mov_b32_e32 v13, v17 -; GFX1150-NEXT: .LBB13_15: ; %frem.loop_exit24 +; GFX1150-NEXT: .LBB13_15: ; %frem.loop_exit ; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-NEXT: v_subrev_nc_u32_e32 v16, 25, v19 ; GFX1150-NEXT: v_ldexp_f64 v[12:13], v[12:13], v16 @@ -17386,7 +17386,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: v_cmp_ngt_f64_e64 s2, |v[0:1]|, |v[4:5]| ; GFX1200-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX1200-NEXT: s_cbranch_vccz .LBB13_2 -; GFX1200-NEXT: ; %bb.1: ; %frem.else +; GFX1200-NEXT: ; %bb.1: ; %frem.else16 ; GFX1200-NEXT: v_cmp_eq_f64_e64 vcc_lo, |v[0:1]|, |v[4:5]| ; GFX1200-NEXT: v_and_b32_e32 v8, 0x80000000, v1 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) @@ -17396,7 +17396,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_branch .LBB13_8 ; GFX1200-NEXT: .LBB13_2: ; GFX1200-NEXT: ; implicit-def: $vgpr8_vgpr9 -; GFX1200-NEXT: .LBB13_3: ; %frem.compute +; GFX1200-NEXT: .LBB13_3: ; %frem.compute15 ; GFX1200-NEXT: v_frexp_mant_f64_e64 v[8:9], |v[0:1]| ; GFX1200-NEXT: v_frexp_exp_i32_f64_e32 v13, v[4:5] ; GFX1200-NEXT: v_frexp_exp_i32_f64_e32 v12, v[0:1] @@ -17429,11 +17429,11 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: v_cmp_gt_i32_e32 vcc_lo, 27, v17 ; GFX1200-NEXT: v_div_fixup_f64 v[12:13], v[12:13], v[8:9], 1.0 ; GFX1200-NEXT: s_cbranch_vccnz .LBB13_7 -; GFX1200-NEXT: ; %bb.4: ; %frem.loop_body.preheader +; GFX1200-NEXT: ; %bb.4: ; %frem.loop_body23.preheader ; GFX1200-NEXT: s_sub_co_i32 s2, s2, s3 ; GFX1200-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1200-NEXT: s_add_co_i32 s2, s2, 26 -; GFX1200-NEXT: .LBB13_5: ; %frem.loop_body +; GFX1200-NEXT: .LBB13_5: ; %frem.loop_body23 ; GFX1200-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1200-NEXT: v_dual_mov_b32 v15, v11 :: v_dual_mov_b32 v14, v10 @@ -17454,7 +17454,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: ; %bb.6: ; %Flow51 ; GFX1200-NEXT: v_dual_mov_b32 v17, s2 :: v_dual_mov_b32 v10, v14 ; GFX1200-NEXT: v_mov_b32_e32 v11, v15 -; GFX1200-NEXT: .LBB13_7: ; %frem.loop_exit +; GFX1200-NEXT: .LBB13_7: ; %frem.loop_exit24 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-NEXT: v_subrev_nc_u32_e32 v14, 25, v17 ; GFX1200-NEXT: v_ldexp_f64 v[10:11], v[10:11], v14 @@ -17476,7 +17476,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_and_b32 vcc_lo, exec_lo, s2 ; GFX1200-NEXT: s_wait_alu 0xfffe ; GFX1200-NEXT: s_cbranch_vccz .LBB13_10 -; GFX1200-NEXT: ; %bb.9: ; %frem.else16 +; GFX1200-NEXT: ; %bb.9: ; %frem.else ; GFX1200-NEXT: v_cmp_eq_f64_e64 vcc_lo, |v[2:3]|, |v[6:7]| ; GFX1200-NEXT: v_and_b32_e32 v10, 0x80000000, v3 ; GFX1200-NEXT: s_wait_alu 0xfffd @@ -17487,7 +17487,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: s_branch .LBB13_16 ; GFX1200-NEXT: .LBB13_10: ; GFX1200-NEXT: ; implicit-def: $vgpr10_vgpr11 -; GFX1200-NEXT: .LBB13_11: ; %frem.compute15 +; GFX1200-NEXT: .LBB13_11: ; %frem.compute ; GFX1200-NEXT: v_frexp_mant_f64_e64 v[10:11], |v[2:3]| ; GFX1200-NEXT: v_frexp_exp_i32_f64_e32 v15, v[6:7] ; GFX1200-NEXT: v_frexp_exp_i32_f64_e32 v14, v[2:3] @@ -17520,11 +17520,11 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: v_cmp_gt_i32_e32 vcc_lo, 27, v19 ; GFX1200-NEXT: v_div_fixup_f64 v[14:15], v[14:15], v[10:11], 1.0 ; GFX1200-NEXT: s_cbranch_vccnz .LBB13_15 -; GFX1200-NEXT: ; %bb.12: ; %frem.loop_body23.preheader +; GFX1200-NEXT: ; %bb.12: ; %frem.loop_body.preheader ; GFX1200-NEXT: s_sub_co_i32 s2, s2, s3 ; GFX1200-NEXT: s_wait_alu 0xfffe ; GFX1200-NEXT: s_add_co_i32 s2, s2, 26 -; GFX1200-NEXT: .LBB13_13: ; %frem.loop_body23 +; GFX1200-NEXT: .LBB13_13: ; %frem.loop_body ; GFX1200-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1200-NEXT: v_dual_mov_b32 v17, v13 :: v_dual_mov_b32 v16, v12 @@ -17547,7 +17547,7 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX1200-NEXT: ; %bb.14: ; %Flow ; GFX1200-NEXT: v_dual_mov_b32 v19, s2 :: v_dual_mov_b32 v12, v16 ; GFX1200-NEXT: v_mov_b32_e32 v13, v17 -; GFX1200-NEXT: .LBB13_15: ; %frem.loop_exit24 +; GFX1200-NEXT: .LBB13_15: ; %frem.loop_exit ; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-NEXT: v_subrev_nc_u32_e32 v16, 25, v19 ; GFX1200-NEXT: v_ldexp_f64 v[12:13], v[12:13], v16 diff --git a/llvm/test/CodeGen/AMDGPU/function-args.ll b/llvm/test/CodeGen/AMDGPU/function-args.ll index 3c41cc4..5babe9f 100644 --- a/llvm/test/CodeGen/AMDGPU/function-args.ll +++ b/llvm/test/CodeGen/AMDGPU/function-args.ll @@ -1111,15 +1111,11 @@ define void @void_func_v4i8(<4 x i8> %arg0) #0 { ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0 ; GFX11-TRUE16-NEXT: s_mov_b64 s[0:1], 0 -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v1.h, v0.h ; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v0.h ; GFX11-TRUE16-NEXT: s_mov_b32 s2, -1 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -1190,18 +1186,15 @@ define void @void_func_v5i8(<5 x i8> %arg0) #0 { ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v3.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0 ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l ; GFX11-TRUE16-NEXT: s_mov_b64 s[0:1], 4 -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l ; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v0.h ; GFX11-TRUE16-NEXT: s_mov_b32 s2, -1 +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: buffer_store_b8 v4, off, s[0:3], 0 ; GFX11-TRUE16-NEXT: s_mov_b64 s[0:1], 0 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v3, v2 ; GFX11-TRUE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -1281,28 +1274,22 @@ define void @void_func_v8i8(<8 x i8> %arg0) #0 { ; GFX11-TRUE16-LABEL: void_func_v8i8: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, 0 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v5.h, v4.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v5.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v6.l +; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v6.l, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v0.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v2.l, v1.h ; GFX11-TRUE16-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v4, v6 -; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v6.l ; GFX11-TRUE16-NEXT: s_mov_b32 s2, -1 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v0, v6 -; GFX11-TRUE16-NEXT: buffer_store_b64 v[1:2], off, s[0:3], 0 +; GFX11-TRUE16-NEXT: buffer_store_b64 v[2:3], off, s[0:3], 0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: void_func_v8i8: @@ -1416,44 +1403,34 @@ define void @void_func_v16i8(<16 x i8> %arg0) #0 { ; GFX11-TRUE16-LABEL: void_func_v16i8: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v15.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v14.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v13.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.l, 8, v15.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v13.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, 0 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v13.h, v12.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.l, 8, v11.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v12.l, v13.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v14.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.l, 8, v9.l -; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v12, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v10.l, v9.h -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v10.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v14.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v8.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v5.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v9, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v5.h, v4.h -; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v4.l, v5.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v14.l +; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v10.h, v15.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v12.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.l, v13.l +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v6.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v6.l, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v0.h, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v2.l, v1.h +; GFX11-TRUE16-NEXT: v_or_b16 v8.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: s_mov_b64 s[0:1], 0 ; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v4, v14 -; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v14.l ; GFX11-TRUE16-NEXT: s_mov_b32 s2, -1 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v0, v14 -; GFX11-TRUE16-NEXT: buffer_store_b128 v[5:8], off, s[0:3], 0 +; GFX11-TRUE16-NEXT: buffer_store_b128 v[8:11], off, s[0:3], 0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: void_func_v16i8: @@ -1649,78 +1626,59 @@ define void @void_func_v32i8(<32 x i8> %arg0) #0 { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: scratch_load_d16_u8 v31, off, s32 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, 0 ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v13.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.h, v32.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v10.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v9.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v3.h, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v32.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v12.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v11.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v10.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v9.l +; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v7.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v5.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v7.h, v6.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v12, v32 -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v5.h, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.h, v32.l -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v30.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v28.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v27.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v26.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v25.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v24.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v23.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v22.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v19.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v18.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.l, 8, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.h, v0.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v2.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v4.l, v5.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v9, v32 -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v6.l, v7.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.h, v32.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v30.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v0.l, v1.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v10, v32 -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v0.h, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v32.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v27.l -; GFX11-TRUE16-NEXT: v_and_b16 v5.l, 0xff, v26.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v24.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v6.h, v5.h -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v11, v32 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.h, v32.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.l, 8, v23.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.h, 8, v21.l -; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v20.l -; GFX11-TRUE16-NEXT: v_or_b16 v14.l, v7.h, v6.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.h, v32.l -; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v4.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v6.h, v5.h +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v8.l, v7.h +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v6.l, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v4.l, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v7.l, v11.l, v10.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v12.l, v11.h +; GFX11-TRUE16-NEXT: v_or_b16 v6.l, v13.l, v12.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v14.l, v13.h +; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v15.l, v14.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v16.h, v15.h +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v16.l, v17.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v9.h ; GFX11-TRUE16-NEXT: s_mov_b64 s[0:1], 16 -; GFX11-TRUE16-NEXT: v_or_b16 v15.l, v6.h, v5.h ; GFX11-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, -1 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v31.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v4.h, v7.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v13, v32 -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v5.l, v4.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v19.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v17.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v14, v32 -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v4.h, v8.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v8.h, v5.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v15, v32 -; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v4.h, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v31.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v9, v32 +; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v10.l, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v9.l, v8.h ; GFX11-TRUE16-NEXT: buffer_store_b128 v[4:7], off, s[0:3], 0 ; GFX11-TRUE16-NEXT: s_mov_b64 s[0:1], 0 ; GFX11-TRUE16-NEXT: buffer_store_b128 v[0:3], off, s[0:3], 0 diff --git a/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll b/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll index f67ab18..234eaa8 100644 --- a/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll +++ b/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll @@ -4985,21 +4985,17 @@ define amdgpu_gfx void @test_call_external_void_func_v4i8_ret() #0 { ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v1.h, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v0.h ; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l ; GFX11-TRUE16-NEXT: v_readlane_b32 s31, v42, 1 ; GFX11-TRUE16-NEXT: v_readlane_b32 s30, v42, 0 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v2 ; GFX11-TRUE16-NEXT: s_mov_b32 s32, s33 -; GFX11-TRUE16-NEXT: v_readlane_b32 s0, v42, 2 ; GFX11-TRUE16-NEXT: global_store_b32 v[40:41], v0, off ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s33 ; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s33 offset:4 +; GFX11-TRUE16-NEXT: v_readlane_b32 s0, v42, 2 ; GFX11-TRUE16-NEXT: s_or_saveexec_b32 s1, -1 ; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s33 offset:8 ; 4-byte Folded Reload ; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s1 @@ -5243,18 +5239,14 @@ define amdgpu_gfx void @test_call_external_void_func_v5i8_ret() #0 { ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, 0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v0.l, v1.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, 4 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v3, v2 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, 0 +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v1.h, v0.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l ; GFX11-TRUE16-NEXT: s_clause 0x1 -; GFX11-TRUE16-NEXT: global_store_b8 v[0:1], v4, off -; GFX11-TRUE16-NEXT: global_store_b32 v[40:41], v2, off +; GFX11-TRUE16-NEXT: global_store_b8 v[2:3], v4, off +; GFX11-TRUE16-NEXT: global_store_b32 v[40:41], v0, off ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s33 ; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s33 offset:4 @@ -5528,27 +5520,21 @@ define amdgpu_gfx void @test_call_external_void_func_v8i8_ret() #0 { ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v5.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, 0 +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v3.h, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, v4.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.l ; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v5, v4 ; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v0.l, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v4.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v0, v4 -; GFX11-TRUE16-NEXT: v_readlane_b32 s31, v42, 1 -; GFX11-TRUE16-NEXT: v_readlane_b32 s30, v42, 0 -; GFX11-TRUE16-NEXT: global_store_b64 v[40:41], v[1:2], off +; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v3.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v2.l, v3.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: global_store_b64 v[40:41], v[3:4], off ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s33 ; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s33 offset:4 +; GFX11-TRUE16-NEXT: v_readlane_b32 s31, v42, 1 +; GFX11-TRUE16-NEXT: v_readlane_b32 s30, v42, 0 ; GFX11-TRUE16-NEXT: s_mov_b32 s32, s33 ; GFX11-TRUE16-NEXT: v_readlane_b32 s0, v42, 2 ; GFX11-TRUE16-NEXT: s_or_saveexec_b32 s1, -1 @@ -5994,73 +5980,53 @@ define amdgpu_gfx void @test_call_external_void_func_v32i8_ret() #0 { ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v13.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, 0 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v7.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v1.h, v0.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v11.l -; GFX11-TRUE16-NEXT: v_or_b16 v13.l, v3.h, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.h, v12.l +; GFX11-TRUE16-NEXT: v_or_b16 v12.l, v3.h, v2.h ; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v8.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.h, v12.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v13, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v7.l -; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v3.h, v2.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v6.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v5.l -; GFX11-TRUE16-NEXT: v_and_b16 v3.h, 0xff, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, v12.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v9, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v3.l -; GFX11-TRUE16-NEXT: v_or_b16 v4.l, v3.h, v2.h -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v2.l -; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0xff, v0.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v12.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v4, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v1.h, v0.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v0.l, v1.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.l, 8, v31.l -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v30.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v29.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v28.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v2, v12 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v17.l -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v0.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.l, 8, v27.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v1.h, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v26.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v25.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v24.l -; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v16.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v2, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v0.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.l, 8, v23.l -; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v1.h, v1.l -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v22.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v21.l -; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v20.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v12 -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v0.h, v0.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.l, 8, v19.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v1.h, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v12.l -; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v18.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v12 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v0.h, v0.l -; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v4.h, v4.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v12.l -; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v12 +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v5.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.l, 8, v3.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v1.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v11.l, v3.h, v2.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v5.h, v4.h +; GFX11-TRUE16-NEXT: v_or_b16 v10.l, v4.l, v5.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v2.l, v3.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.h, 8, v31.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v30.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v29.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.h, 0xff, v28.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v27.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v26.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v5.l, 8, v1.l +; GFX11-TRUE16-NEXT: v_and_b16 v5.h, 0xff, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v1.h, v0.h +; GFX11-TRUE16-NEXT: v_or_b16 v3.l, v2.h, v2.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v4.h, v4.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.l, 8, v25.l +; GFX11-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v24.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.l, 8, v23.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.h, 0xff, v22.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.l, 8, v21.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.h, 0xff, v20.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v19.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v18.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v17.l +; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v16.l +; GFX11-TRUE16-NEXT: v_or_b16 v2.l, v0.h, v0.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v1.h, v1.l +; GFX11-TRUE16-NEXT: v_or_b16 v1.l, v4.h, v4.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.h, v6.h, v6.l +; GFX11-TRUE16-NEXT: v_or_b16 v0.l, v7.h, v7.l +; GFX11-TRUE16-NEXT: v_or_b16 v9.l, v5.h, v5.l ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: global_store_b128 v[42:43], v[0:3], off -; GFX11-TRUE16-NEXT: global_store_b128 v[40:41], v[5:8], off +; GFX11-TRUE16-NEXT: global_store_b128 v[40:41], v[9:12], off ; GFX11-TRUE16-NEXT: s_clause 0x3 ; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s33 ; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s33 offset:4 diff --git a/llvm/test/CodeGen/AMDGPU/global-constant.ll b/llvm/test/CodeGen/AMDGPU/global-constant.ll index 866d3a1..b04602a 100644 --- a/llvm/test/CodeGen/AMDGPU/global-constant.ll +++ b/llvm/test/CodeGen/AMDGPU/global-constant.ll @@ -12,21 +12,21 @@ ; Non-R600 OSes use relocations. ; GCN-DEFAULT: s_getpc_b64 s[[[PC0_LO:[0-9]+]]:[[PC0_HI:[0-9]+]]] -; GCN-DEFAULT: s_add_u32 s{{[0-9]+}}, s[[PC0_LO]], private1@rel32@lo+4 -; GCN-DEFAULT: s_addc_u32 s{{[0-9]+}}, s[[PC0_HI]], private1@rel32@hi+12 +; GCN-DEFAULT: s_add_u32 s{{[0-9]+}}, s[[PC0_LO]], .Lprivate1@rel32@lo+4 +; GCN-DEFAULT: s_addc_u32 s{{[0-9]+}}, s[[PC0_HI]], .Lprivate1@rel32@hi+12 ; GCN-DEFAULT: s_getpc_b64 s[[[PC1_LO:[0-9]+]]:[[PC1_HI:[0-9]+]]] -; GCN-DEFAULT: s_add_u32 s{{[0-9]+}}, s[[PC1_LO]], private2@rel32@lo+4 -; GCN-DEFAULT: s_addc_u32 s{{[0-9]+}}, s[[PC1_HI]], private2@rel32@hi+12 +; GCN-DEFAULT: s_add_u32 s{{[0-9]+}}, s[[PC1_LO]], .Lprivate2@rel32@lo+4 +; GCN-DEFAULT: s_addc_u32 s{{[0-9]+}}, s[[PC1_HI]], .Lprivate2@rel32@hi+12 ; MESA uses absolute relocations. -; GCN-MESA: s_add_u32 s2, private1@abs32@lo, s4 -; GCN-MESA: s_addc_u32 s3, private1@abs32@hi, s5 +; GCN-MESA: s_add_u32 s2, .Lprivate1@abs32@lo, s4 +; GCN-MESA: s_addc_u32 s3, .Lprivate1@abs32@hi, s5 ; PAL uses absolute relocations. -; GCN-PAL: s_add_u32 s2, private1@abs32@lo, s4 -; GCN-PAL: s_addc_u32 s3, private1@abs32@hi, s5 -; GCN-PAL: s_add_u32 s4, private2@abs32@lo, s4 -; GCN-PAL: s_addc_u32 s5, private2@abs32@hi, s5 +; GCN-PAL: s_add_u32 s2, .Lprivate1@abs32@lo, s4 +; GCN-PAL: s_addc_u32 s3, .Lprivate1@abs32@hi, s5 +; GCN-PAL: s_add_u32 s4, .Lprivate2@abs32@lo, s4 +; GCN-PAL: s_addc_u32 s5, .Lprivate2@abs32@hi, s5 ; R600-LABEL: private_test define amdgpu_kernel void @private_test(i32 %index, ptr addrspace(1) %out) { diff --git a/llvm/test/CodeGen/AMDGPU/global-variable-relocs.ll b/llvm/test/CodeGen/AMDGPU/global-variable-relocs.ll index b8cfcbf..6d55e79 100644 --- a/llvm/test/CodeGen/AMDGPU/global-variable-relocs.ll +++ b/llvm/test/CodeGen/AMDGPU/global-variable-relocs.ll @@ -14,8 +14,8 @@ ; CHECK-LABEL: private_test: ; CHECK: s_getpc_b64 s[[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]] -; CHECK: s_add_u32 s[[ADDR_LO:[0-9]+]], s[[PC_LO]], private@rel32@lo+8 -; CHECK: s_addc_u32 s[[ADDR_HI:[0-9]+]], s[[PC_HI]], private@rel32@hi+16 +; CHECK: s_add_u32 s[[ADDR_LO:[0-9]+]], s[[PC_LO]], .Lprivate@rel32@lo+8 +; CHECK: s_addc_u32 s[[ADDR_HI:[0-9]+]], s[[PC_HI]], .Lprivate@rel32@hi+16 ; CHECK: s_load_dword s{{[0-9]+}}, s[[[ADDR_LO]]:[[ADDR_HI]]] define amdgpu_kernel void @private_test(ptr addrspace(1) %out) { %ptr = getelementptr [256 x i32], ptr addrspace(1) @private, i32 0, i32 1 @@ -153,7 +153,7 @@ define amdgpu_kernel void @external_w_init_test(ptr addrspace(1) %out) { ret void } -; CHECK: .local private +; CHECK: .local .Lprivate ; CHECK: .local internal ; CHECK: .weak linkonce ; CHECK: .weak weak diff --git a/llvm/test/CodeGen/AMDGPU/idot4u.ll b/llvm/test/CodeGen/AMDGPU/idot4u.ll index 049663a..f80d50b 100644 --- a/llvm/test/CodeGen/AMDGPU/idot4u.ll +++ b/llvm/test/CodeGen/AMDGPU/idot4u.ll @@ -2730,18 +2730,15 @@ define amdgpu_kernel void @udot4_acc8_vecMul(ptr addrspace(1) %src1, ; GFX11-DL-TRUE16-NEXT: v_lshrrev_b16 v1.h, 8, v4.l ; GFX11-DL-TRUE16-NEXT: v_mad_u16 v0.l, v3.l, v4.l, v0.l ; GFX11-DL-TRUE16-NEXT: v_mul_lo_u16 v2.l, v2.l, v6.l -; GFX11-DL-TRUE16-NEXT: v_mov_b16_e32 v6.l, 0 +; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-DL-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v1.l ; GFX11-DL-TRUE16-NEXT: v_mul_lo_u16 v0.h, v0.h, v1.h -; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-DL-TRUE16-NEXT: v_lshlrev_b16 v2.l, 8, v2.l -; GFX11-DL-TRUE16-NEXT: v_mov_b16_e32 v7.h, v6.l -; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-DL-TRUE16-NEXT: v_lshlrev_b16 v7.l, 8, v0.h +; GFX11-DL-TRUE16-NEXT: v_lshlrev_b16 v6.l, 8, v0.h +; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-DL-TRUE16-NEXT: v_or_b16 v6.h, v1.l, v2.l -; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-DL-TRUE16-NEXT: v_or_b32_e32 v1, v7, v6 -; GFX11-DL-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v1 +; GFX11-DL-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v6 ; GFX11-DL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX11-DL-TRUE16-NEXT: v_add_nc_u16 v0.l, v0.l, v1.l ; GFX11-DL-TRUE16-NEXT: v_lshrrev_b32_e32 v1, 8, v2 diff --git a/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll b/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll index 6b09424..eee232a 100644 --- a/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll +++ b/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll @@ -49,7 +49,6 @@ bb: ret void } -; FIXME: This generates "instid1(/* invalid instid value */)". define amdgpu_kernel void @f2(i32 %arg, i32 %arg1, i32 %arg2, i1 %arg3, i32 %arg4, i1 %arg5, ptr %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10, i1 %arg11) { ; GFX11-LABEL: f2: ; GFX11: ; %bb.0: ; %bb diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.exp.large.mir b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.exp.large.mir index b07dec3..689d147 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.exp.large.mir +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.exp.large.mir @@ -6,1153 +6,1147 @@ define amdgpu_kernel void @largeInterleave() #0 { ret void } ; GCN-LABEL: largeInterleave: ; GCN: ; %bb.0: - ; GCN-NEXT: ; implicit-def: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 - ; GCN-NEXT: ; implicit-def: $vgpr0 - ; GCN-NEXT: ; implicit-def: $vgpr2 - ; GCN-NEXT: ; implicit-def: $vgpr1 - ; GCN-NEXT: ; implicit-def: $vgpr8 - ; GCN-NEXT: ; implicit-def: $vgpr94 - ; GCN-NEXT: ; implicit-def: $vgpr76_vgpr77_vgpr78_vgpr79 - ; GCN-NEXT: ; implicit-def: $vgpr106 - ; GCN-NEXT: ; implicit-def: $vgpr132 - ; GCN-NEXT: ; implicit-def: $vgpr133 - ; GCN-NEXT: ; implicit-def: $vgpr139 - ; GCN-NEXT: ; implicit-def: $vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127 - ; GCN-NEXT: ; iglp_opt mask(0x00000002) - ; GCN-NEXT: ; implicit-def: $sgpr0 + ; GCN-NEXT: ; implicit-def: $vgpr16 + ; GCN-NEXT: ; implicit-def: $vgpr25 ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) - ; GCN-NEXT: v_readfirstlane_b32 s7, v0 + ; GCN-NEXT: v_readfirstlane_b32 s17, v16 + ; GCN-NEXT: ; implicit-def: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 + ; GCN-NEXT: ; implicit-def: $vgpr17 + ; GCN-NEXT: ; implicit-def: $sgpr15 ; GCN-NEXT: ; implicit-def: $sgpr8_sgpr9_sgpr10_sgpr11 - ; GCN-NEXT: ; kill: killed $sgpr8_sgpr9_sgpr10_sgpr11 - ; GCN-NEXT: ; implicit-def: $sgpr5 - ; GCN-NEXT: s_nop 1 - ; GCN-NEXT: v_lshl_add_u32 v0, s7, 4, v2 - ; GCN-NEXT: v_mul_lo_u32 v0, v0, s6 - ; GCN-NEXT: v_add_lshl_u32 v92, v0, v1, 1 - ; GCN-NEXT: v_add_u32_e32 v93, s0, v92 - ; GCN-NEXT: buffer_load_dwordx4 v[0:3], v92, s[8:11], 0 offen sc0 sc1 + ; GCN-NEXT: s_lshl_b32 s18, s17, 7 + ; GCN-NEXT: ; implicit-def: $vgpr18 + ; GCN-NEXT: v_add_lshl_u32 v230, v18, s18, 1 + ; GCN-NEXT: v_lshl_add_u32 v25, s17, 4, v25 + ; GCN-NEXT: v_mul_lo_u32 v25, v25, s6 + ; GCN-NEXT: v_add_lshl_u32 v226, v25, v17, 1 + ; GCN-NEXT: v_add_u32_e32 v17, s15, v226 + ; GCN-NEXT: buffer_load_dwordx4 v[64:67], v226, s[8:11], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: buffer_load_dwordx4 v[4:7], v93, s[8:11], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx4 v[68:71], v17, s[8:11], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: s_lshl_b32 s0, s7, 7 - ; GCN-NEXT: v_add_lshl_u32 v95, v8, s0, 1 - ; GCN-NEXT: v_add_u32_e32 v8, 64, v93 - ; GCN-NEXT: ; kill: killed $vgpr8 + ; GCN-NEXT: v_add_u32_e32 v72, 64, v17 + ; GCN-NEXT: ; implicit-def: $vgpr213 + ; GCN-NEXT: ; implicit-def: $vgpr152_vgpr153_vgpr154_vgpr155 + ; GCN-NEXT: ; implicit-def: $vgpr246 + ; GCN-NEXT: v_add_u32_e32 v188, 0x80, v17 + ; GCN-NEXT: ; implicit-def: $vgpr156_vgpr157_vgpr158_vgpr159 + ; GCN-NEXT: ; implicit-def: $vgpr144_vgpr145_vgpr146_vgpr147 + ; GCN-NEXT: ; implicit-def: $vgpr19 + ; GCN-NEXT: ; implicit-def: $vgpr26 + ; GCN-NEXT: ; implicit-def: $vgpr27 + ; GCN-NEXT: v_add_u32_e32 v227, 0xc0, v17 + ; GCN-NEXT: v_add_u32_e32 v231, v19, v26 + ; GCN-NEXT: v_add_u32_e32 v232, v19, v27 ; GCN-NEXT: ; implicit-def: $sgpr0_sgpr1_sgpr2_sgpr3 - ; GCN-NEXT: ; kill: killed $vgpr92 - ; GCN-NEXT: ; implicit-def: $sgpr6 + ; GCN-NEXT: ; implicit-def: $vgpr28 + ; GCN-NEXT: ; implicit-def: $vgpr29 + ; GCN-NEXT: v_add_u32_e32 v233, v19, v28 + ; GCN-NEXT: v_add_u32_e32 v234, v19, v29 + ; GCN-NEXT: ; implicit-def: $vgpr140_vgpr141_vgpr142_vgpr143 + ; GCN-NEXT: ; implicit-def: $sgpr5 + ; GCN-NEXT: ; implicit-def: $sgpr7 + ; GCN-NEXT: ; implicit-def: $vgpr148_vgpr149_vgpr150_vgpr151 + ; GCN-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139 + ; GCN-NEXT: ; implicit-def: $vgpr132_vgpr133_vgpr134_vgpr135 + ; GCN-NEXT: ; implicit-def: $vgpr20 + ; GCN-NEXT: v_add_u32_e32 v18, s17, v20 + ; GCN-NEXT: v_and_b32_e32 v18, 0x1fffffff, v18 + ; GCN-NEXT: ; implicit-def: $sgpr16 + ; GCN-NEXT: v_mul_lo_u32 v18, v18, s16 + ; GCN-NEXT: ; implicit-def: $vgpr21 + ; GCN-NEXT: v_add_lshl_u32 v199, v21, v18, 1 + ; GCN-NEXT: ; implicit-def: $vgpr22 + ; GCN-NEXT: v_lshl_add_u32 v200, v22, 1, v199 + ; GCN-NEXT: ; implicit-def: $vgpr23 + ; GCN-NEXT: v_lshl_add_u32 v201, v23, 1, v200 + ; GCN-NEXT: ; implicit-def: $vgpr24 + ; GCN-NEXT: v_lshl_add_u32 v202, v24, 1, v201 + ; GCN-NEXT: ; implicit-def: $vgpr16 + ; GCN-NEXT: ; implicit-def: $vgpr18 + ; GCN-NEXT: ; implicit-def: $vgpr20 + ; GCN-NEXT: ; implicit-def: $vgpr24 + ; GCN-NEXT: v_add_u32_e32 v247, v19, v24 + ; GCN-NEXT: v_add_u32_e32 v248, v19, v16 + ; GCN-NEXT: v_add_u32_e32 v249, v19, v18 + ; GCN-NEXT: v_add_u32_e32 v250, v19, v20 + ; GCN-NEXT: ; implicit-def: $vgpr128_vgpr129_vgpr130_vgpr131 + ; GCN-NEXT: ; implicit-def: $sgpr14 + ; GCN-NEXT: ; implicit-def: $vgpr196 + ; GCN-NEXT: ; implicit-def: $sgpr12_sgpr13 + ; GCN-NEXT: ; implicit-def: $vgpr211 + ; GCN-NEXT: v_max_f32_e32 v212, v211, v211 + ; GCN-NEXT: ; implicit-def: $vgpr198 + ; GCN-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; GCN-NEXT: ; implicit-def: $vgpr32 + ; GCN-NEXT: ; implicit-def: $vgpr33 + ; GCN-NEXT: ; implicit-def: $vgpr34 + ; GCN-NEXT: v_add_u32_e32 v210, v19, v34 + ; GCN-NEXT: v_add_u32_e32 v206, v19, v33 + ; GCN-NEXT: v_add_u32_e32 v205, v19, v32 + ; GCN-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; GCN-NEXT: ; implicit-def: $vgpr21 + ; GCN-NEXT: ; implicit-def: $vgpr22 + ; GCN-NEXT: ; implicit-def: $vgpr23 + ; GCN-NEXT: ; implicit-def: $vgpr30 + ; GCN-NEXT: ; implicit-def: $vgpr31 + ; GCN-NEXT: v_add_u32_e32 v207, v19, v21 + ; GCN-NEXT: v_add_u32_e32 v208, v19, v22 + ; GCN-NEXT: v_add_u32_e32 v209, v19, v23 + ; GCN-NEXT: v_add_u32_e32 v203, v19, v30 + ; GCN-NEXT: v_add_u32_e32 v204, v19, v31 + ; GCN-NEXT: ; kill: killed $vgpr17 + ; GCN-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; GCN-NEXT: ; implicit-def: $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; GCN-NEXT: ; implicit-def: $vgpr197 + ; GCN-NEXT: ; iglp_opt mask(0x00000002) ; GCN-NEXT: buffer_wbl2 sc0 sc1 - ; GCN-NEXT: ds_write_b128 v95, v[0:3] + ; GCN-NEXT: ds_write_b128 v230, v[64:67] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b128 v95, v[4:7] offset:1024 + ; GCN-NEXT: ds_write_b128 v230, v[68:71] offset:1024 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_load_dwordx4 v[64:67], v92, s[8:11], 0 offen offset:64 sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx4 v[160:163], v226, s[8:11], 0 offen offset:64 sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: buffer_load_dwordx4 v[68:71], v8, s[8:11], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx4 v[164:167], v72, s[8:11], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND - ; GCN-NEXT: ds_read_b128 v[72:75], v94 + ; GCN-NEXT: ds_read_b128 v[64:67], v213 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ds_read_b128 v[80:83], v94 offset:512 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[64:65], v[152:153], 0 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[66:67], v[154:155], v[112:127] + ; GCN-NEXT: ds_read_b128 v[64:67], v213 offset:512 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ds_read_b128 v[84:87], v94 offset:1024 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[64:65], v[152:153], 0 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[66:67], v[154:155], v[96:111] + ; GCN-NEXT: ds_read_b128 v[64:67], v213 offset:1024 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[72:73], v[76:77], 0 - ; GCN-NEXT: ds_read_b128 v[88:91], v94 offset:1536 + ; GCN-NEXT: ds_read_b128 v[168:171], v213 offset:1536 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[74:75], v[78:79], v[48:63] - ; GCN-NEXT: ds_read_b128 v[72:75], v106 + ; GCN-NEXT: ds_read_b128 v[172:175], v246 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[80:81], v[76:77], 0 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[84:85], v[76:77], 0 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[88:89], v[76:77], 0 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[82:83], v[78:79], v[32:47] - ; GCN-NEXT: ds_read_b128 v[80:83], v106 offset:512 + ; GCN-NEXT: ds_read_b128 v[176:179], v246 offset:512 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[86:87], v[78:79], v[16:31] - ; GCN-NEXT: ds_read_b128 v[84:87], v106 offset:1024 + ; GCN-NEXT: ds_read_b128 v[180:183], v246 offset:1024 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[90:91], v[78:79], v[0:15] - ; GCN-NEXT: ; implicit-def: $vgpr76_vgpr77_vgpr78_vgpr79 - ; GCN-NEXT: ds_read_b128 v[88:91], v106 offset:1536 + ; GCN-NEXT: ds_read_b128 v[184:187], v246 offset:1536 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[64:65], v[152:153], 0 ; GCN-NEXT: buffer_wbl2 sc0 sc1 - ; GCN-NEXT: ds_write_b128 v95, v[64:67] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[72:73], v[76:77], v[48:63] - ; GCN-NEXT: v_add_u32_e32 v72, 0x80, v93 + ; GCN-NEXT: ds_write_b128 v230, v[160:163] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[66:67], v[154:155], v[80:95] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b128 v95, v[68:71] offset:1024 + ; GCN-NEXT: ds_write_b128 v230, v[164:167] offset:1024 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[168:169], v[152:153], 0 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[170:171], v[154:155], v[64:79] ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_load_dwordx4 v[64:67], v92, s[8:11], 0 offen offset:128 sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx4 v[152:155], v226, s[8:11], 0 offen offset:128 sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: buffer_load_dwordx4 v[68:71], v72, s[8:11], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx4 v[160:163], v188, s[8:11], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND - ; GCN-NEXT: ; kill: killed $vgpr72 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[74:75], v[78:79], v[48:63] - ; GCN-NEXT: ds_read_b128 v[72:75], v94 + ; GCN-NEXT: ds_read_b128 v[188:191], v213 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[80:81], v[76:77], v[32:47] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[84:85], v[76:77], v[16:31] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[88:89], v[76:77], v[0:15] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[82:83], v[78:79], v[32:47] - ; GCN-NEXT: ds_read_b128 v[80:83], v94 offset:512 + ; GCN-NEXT: ds_read_b128 v[192:195], v213 offset:512 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[86:87], v[78:79], v[16:31] - ; GCN-NEXT: ds_read_b128 v[84:87], v94 offset:1024 + ; GCN-NEXT: ds_read_b128 v[164:167], v213 offset:1024 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[90:91], v[78:79], v[0:15] - ; GCN-NEXT: ; implicit-def: $vgpr76_vgpr77_vgpr78_vgpr79 - ; GCN-NEXT: ds_read_b128 v[88:91], v94 offset:1536 + ; GCN-NEXT: ds_read_b128 v[214:217], v213 offset:1536 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[72:73], v[76:77], v[48:63] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[74:75], v[78:79], v[48:63] - ; GCN-NEXT: ds_read_b128 v[72:75], v106 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[172:173], v[156:157], v[112:127] + ; GCN-NEXT: ds_read_b128 v[218:221], v246 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[80:81], v[76:77], v[32:47] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[84:85], v[76:77], v[16:31] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[88:89], v[76:77], v[0:15] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[82:83], v[78:79], v[32:47] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[86:87], v[78:79], v[16:31] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[90:91], v[78:79], v[0:15] - ; GCN-NEXT: ; implicit-def: $vgpr76_vgpr77_vgpr78_vgpr79 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[72:73], v[76:77], v[48:63] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[74:75], v[78:79], v[48:63] - ; GCN-NEXT: ds_read_b128 v[72:75], v106 offset:512 + ; GCN-NEXT: ds_read_b128 v[222:225], v246 offset:512 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[72:73], v[76:77], v[32:47] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[74:75], v[78:79], v[32:47] - ; GCN-NEXT: ds_read_b128 v[72:75], v106 offset:1024 + ; GCN-NEXT: ds_read_b128 v[168:171], v246 offset:1024 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[72:73], v[76:77], v[16:31] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[74:75], v[78:79], v[16:31] - ; GCN-NEXT: ds_read_b128 v[72:75], v106 offset:1536 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[174:175], v[158:159], v[112:127] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[188:189], v[144:145], v[112:127] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[190:191], v[146:147], v[112:127] + ; GCN-NEXT: ds_read_b128 v[188:191], v246 offset:1536 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND ; GCN-NEXT: buffer_wbl2 sc0 sc1 - ; GCN-NEXT: ds_write_b128 v95, v[64:67] + ; GCN-NEXT: ds_write_b128 v230, v[152:155] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b128 v95, v[68:71] offset:1024 - ; GCN-NEXT: ; implicit-def: $vgpr64 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[72:73], v[76:77], v[0:15] - ; GCN-NEXT: v_add_u32_e32 v72, 0xc0, v93 - ; GCN-NEXT: ; implicit-def: $vgpr73 - ; GCN-NEXT: v_add_u32_e32 v76, v132, v64 + ; GCN-NEXT: ds_write_b128 v230, v[160:163] offset:1024 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_load_dwordx4 v[64:67], v92, s[8:11], 0 offen offset:192 sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx4 v[152:155], v226, s[8:11], 0 offen offset:192 sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: buffer_load_dwordx4 v[68:71], v72, s[8:11], 0 offen sc0 sc1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[184:185], v[156:157], v[64:79] + ; GCN-NEXT: buffer_load_dwordx4 v[226:229], v227, s[8:11], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ; kill: killed $vgpr72 - ; GCN-NEXT: v_add_u32_e32 v72, v132, v73 - ; GCN-NEXT: buffer_load_dwordx2 v[98:99], v76, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx2 v[160:161], v231, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: buffer_load_dwordx2 v[102:103], v72, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx2 v[162:163], v232, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[74:75], v[78:79], v[0:15] - ; GCN-NEXT: ; implicit-def: $vgpr74 - ; GCN-NEXT: v_add_u32_e32 v72, v132, v74 - ; GCN-NEXT: ; implicit-def: $vgpr75 - ; GCN-NEXT: buffer_load_dwordx2 v[100:101], v72, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx2 v[172:173], v233, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_add_u32_e32 v72, v132, v75 - ; GCN-NEXT: buffer_load_dwordx2 v[104:105], v72, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx2 v[174:175], v234, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND - ; GCN-NEXT: ds_read_b128 v[72:75], v94 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[186:187], v[158:159], v[64:79] + ; GCN-NEXT: v_perm_b32 v238, v162, v160, s5 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[218:219], v[140:141], v[112:127] + ; GCN-NEXT: v_perm_b32 v240, v162, v160, s7 + ; GCN-NEXT: v_perm_b32 v242, v163, v161, s5 + ; GCN-NEXT: v_perm_b32 v244, v163, v161, s7 + ; GCN-NEXT: ds_read_b128 v[160:163], v213 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ; kill: killed $vgpr76 - ; GCN-NEXT: ; implicit-def: $vgpr76_vgpr77_vgpr78_vgpr79 - ; GCN-NEXT: ; implicit-def: $sgpr8 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[72:73], v[76:77], v[48:63] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[74:75], v[78:79], v[48:63] - ; GCN-NEXT: ds_read_b128 v[72:75], v94 offset:512 + ; GCN-NEXT: v_perm_b32 v239, v174, v172, s5 + ; GCN-NEXT: v_perm_b32 v241, v174, v172, s7 + ; GCN-NEXT: v_perm_b32 v243, v175, v173, s5 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[214:215], v[144:145], v[64:79] + ; GCN-NEXT: v_perm_b32 v245, v175, v173, s7 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[176:177], v[156:157], v[96:111] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[220:221], v[142:143], v[112:127] + ; GCN-NEXT: ds_read_b128 v[218:221], v213 offset:512 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[72:73], v[76:77], v[32:47] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[74:75], v[78:79], v[32:47] - ; GCN-NEXT: ds_read_b128 v[72:75], v94 offset:1024 + ; GCN-NEXT: ds_read_b128 v[172:175], v213 offset:1024 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[72:73], v[76:77], v[16:31] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[74:75], v[78:79], v[16:31] - ; GCN-NEXT: ds_read_b128 v[72:75], v94 offset:1536 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[216:217], v[146:147], v[64:79] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[178:179], v[158:159], v[96:111] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[160:161], v[148:149], v[112:127] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[188:189], v[140:141], v[64:79] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[192:193], v[144:145], v[96:111] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[162:163], v[150:151], v[112:127] + ; GCN-NEXT: ds_read_b128 v[160:163], v213 offset:1536 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[72:73], v[76:77], v[0:15] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[74:75], v[78:79], v[0:15] - ; GCN-NEXT: ds_read_b128 v[72:75], v106 + ; GCN-NEXT: ds_read_b128 v[184:187], v246 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ; implicit-def: $vgpr76_vgpr77_vgpr78_vgpr79 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[72:73], v[76:77], v[48:63] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[74:75], v[78:79], v[48:63] - ; GCN-NEXT: ds_read_b128 v[72:75], v106 offset:512 + ; GCN-NEXT: ds_read_b128 v[214:217], v246 offset:512 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[72:73], v[76:77], v[32:47] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[74:75], v[78:79], v[32:47] - ; GCN-NEXT: ds_read_b128 v[72:75], v106 offset:1024 + ; GCN-NEXT: ds_read_b128 v[176:179], v246 offset:1024 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[72:73], v[76:77], v[16:31] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[74:75], v[78:79], v[16:31] - ; GCN-NEXT: ds_read_b128 v[72:75], v106 offset:1536 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[190:191], v[142:143], v[64:79] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[194:195], v[146:147], v[96:111] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[160:161], v[148:149], v[64:79] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[180:181], v[156:157], v[80:95] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[184:185], v[136:137], v[112:127] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[222:223], v[140:141], v[96:111] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[162:163], v[150:151], v[64:79] + ; GCN-NEXT: ds_read_b128 v[160:163], v246 offset:1536 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND ; GCN-NEXT: buffer_wbl2 sc0 sc1 - ; GCN-NEXT: ds_write_b128 v95, v[64:67] + ; GCN-NEXT: ds_write_b128 v230, v[152:155] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b128 v95, v[68:71] offset:1024 + ; GCN-NEXT: ds_write_b128 v230, v[226:229] offset:1024 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[182:183], v[158:159], v[80:95] ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_read_b128 v[64:67], v94 + ; GCN-NEXT: ds_read_b128 v[156:159], v213 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ds_read_b128 v[90:93], v94 offset:512 + ; GCN-NEXT: ds_read_b128 v[226:229], v213 offset:512 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[72:73], v[76:77], v[0:15] - ; GCN-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71 - ; GCN-NEXT: ds_read_b128 v[84:87], v94 offset:1024 + ; GCN-NEXT: ds_read_b128 v[180:183], v213 offset:1024 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[64:65], v[68:69], v[48:63] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[74:75], v[78:79], v[0:15] - ; GCN-NEXT: ds_read_b128 v[76:79], v94 offset:1536 + ; GCN-NEXT: ds_read_b128 v[152:155], v213 offset:1536 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ds_read_b128 v[94:97], v106 + ; GCN-NEXT: ds_read_b128 v[230:233], v246 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[66:67], v[70:71], v[48:63] - ; GCN-NEXT: ; implicit-def: $vgpr64_vgpr65_vgpr66_vgpr67 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[90:91], v[68:69], v[32:47] - ; GCN-NEXT: ds_read_b128 v[88:91], v106 offset:512 + ; GCN-NEXT: ds_read_b128 v[234:237], v246 offset:512 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ds_read_b128 v[80:83], v106 offset:1024 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[186:187], v[138:139], v[112:127] + ; GCN-NEXT: ds_read_b128 v[184:187], v246 offset:1024 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ds_read_b128 v[72:75], v106 offset:1536 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[224:225], v[142:143], v[96:111] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[156:157], v[132:133], v[112:127] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[218:219], v[148:149], v[96:111] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[158:159], v[134:135], v[112:127] + ; GCN-NEXT: ds_read_b128 v[156:159], v246 offset:1536 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[94:95], v[64:65], v[48:63] - ; GCN-NEXT: v_perm_b32 v94, v102, v98, s5 - ; GCN-NEXT: v_perm_b32 v98, v102, v98, s8 - ; GCN-NEXT: v_perm_b32 v102, v103, v99, s5 - ; GCN-NEXT: v_perm_b32 v95, v104, v100, s5 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[92:93], v[70:71], v[32:47] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[84:85], v[68:69], v[16:31] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[96:97], v[66:67], v[48:63] - ; GCN-NEXT: v_perm_b32 v96, v103, v99, s8 - ; GCN-NEXT: v_perm_b32 v99, v104, v100, s8 - ; GCN-NEXT: v_perm_b32 v103, v105, v101, s5 - ; GCN-NEXT: v_perm_b32 v97, v105, v101, s8 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[88:89], v[64:65], v[32:47] - ; GCN-NEXT: s_nop 5 - ; GCN-NEXT: v_mul_f32_e32 v100, s4, v48 - ; GCN-NEXT: v_mul_f32_e32 v101, s4, v49 - ; GCN-NEXT: v_max3_f32 v92, v100, s6, v101 - ; GCN-NEXT: v_mul_f32_e32 v93, s4, v50 - ; GCN-NEXT: v_mul_f32_e32 v100, s4, v51 - ; GCN-NEXT: v_max3_f32 v92, v92, v93, v100 - ; GCN-NEXT: v_mul_f32_e32 v93, s4, v52 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[86:87], v[70:71], v[16:31] - ; GCN-NEXT: v_mul_f32_e32 v100, s4, v53 - ; GCN-NEXT: v_max3_f32 v92, v92, v93, v100 - ; GCN-NEXT: v_mul_f32_e32 v84, s4, v54 - ; GCN-NEXT: v_mul_f32_e32 v85, s4, v55 - ; GCN-NEXT: v_max3_f32 v84, v92, v84, v85 - ; GCN-NEXT: v_mul_f32_e32 v85, s4, v56 - ; GCN-NEXT: v_mul_f32_e32 v92, s4, v57 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[76:77], v[68:69], v[0:15] - ; GCN-NEXT: v_max3_f32 v84, v84, v85, v92 - ; GCN-NEXT: v_mul_f32_e32 v85, s4, v58 - ; GCN-NEXT: v_mul_f32_e32 v88, s4, v59 - ; GCN-NEXT: v_max3_f32 v84, v84, v85, v88 - ; GCN-NEXT: v_mul_f32_e32 v85, s4, v60 - ; GCN-NEXT: v_mul_f32_e32 v88, s4, v61 - ; GCN-NEXT: v_max3_f32 v84, v84, v85, v88 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[90:91], v[66:67], v[32:47] - ; GCN-NEXT: v_mul_f32_e32 v85, s4, v62 - ; GCN-NEXT: v_mul_f32_e32 v88, s4, v63 - ; GCN-NEXT: v_max3_f32 v84, v84, v85, v88 - ; GCN-NEXT: ; implicit-def: $sgpr6 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[80:81], v[64:65], v[16:31] - ; GCN-NEXT: s_nop 6 - ; GCN-NEXT: v_mul_f32_e32 v85, s4, v32 - ; GCN-NEXT: v_mul_f32_e32 v88, s4, v33 - ; GCN-NEXT: v_max3_f32 v84, v84, v85, v88 - ; GCN-NEXT: v_mul_f32_e32 v85, s4, v34 - ; GCN-NEXT: v_mul_f32_e32 v88, s4, v35 - ; GCN-NEXT: v_max3_f32 v84, v84, v85, v88 - ; GCN-NEXT: v_mul_f32_e32 v85, s4, v36 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[78:79], v[70:71], v[0:15] - ; GCN-NEXT: v_mul_f32_e32 v86, s4, v37 - ; GCN-NEXT: v_max3_f32 v84, v84, v85, v86 - ; GCN-NEXT: v_mul_f32_e32 v85, s4, v38 - ; GCN-NEXT: v_mul_f32_e32 v86, s4, v39 - ; GCN-NEXT: v_max3_f32 v84, v84, v85, v86 - ; GCN-NEXT: v_mul_f32_e32 v85, s4, v40 - ; GCN-NEXT: v_mul_f32_e32 v80, s4, v41 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[82:83], v[66:67], v[16:31] - ; GCN-NEXT: v_max3_f32 v80, v84, v85, v80 - ; GCN-NEXT: v_mul_f32_e32 v81, s4, v42 - ; GCN-NEXT: v_mul_f32_e32 v84, s4, v43 - ; GCN-NEXT: v_max3_f32 v80, v80, v81, v84 - ; GCN-NEXT: v_mul_f32_e32 v81, s4, v44 - ; GCN-NEXT: v_mul_f32_e32 v84, s4, v45 - ; GCN-NEXT: v_max3_f32 v80, v80, v81, v84 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[72:73], v[64:65], v[0:15] - ; GCN-NEXT: v_mul_f32_e32 v81, s4, v46 - ; GCN-NEXT: v_mul_f32_e32 v82, s4, v47 - ; GCN-NEXT: v_max3_f32 v80, v80, v81, v82 - ; GCN-NEXT: v_mul_f32_e32 v81, s4, v16 - ; GCN-NEXT: v_mul_f32_e32 v82, s4, v17 - ; GCN-NEXT: v_max3_f32 v80, v80, v81, v82 - ; GCN-NEXT: v_mul_f32_e32 v68, s4, v18 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[74:75], v[66:67], v[0:15] - ; GCN-NEXT: v_mul_f32_e32 v69, s4, v19 - ; GCN-NEXT: v_max3_f32 v68, v80, v68, v69 - ; GCN-NEXT: v_mul_f32_e32 v69, s4, v20 - ; GCN-NEXT: v_mul_f32_e32 v76, s4, v21 - ; GCN-NEXT: v_max3_f32 v68, v68, v69, v76 - ; GCN-NEXT: v_mul_f32_e32 v69, s4, v22 - ; GCN-NEXT: v_mul_f32_e32 v70, s4, v23 - ; GCN-NEXT: v_max3_f32 v68, v68, v69, v70 - ; GCN-NEXT: v_mul_f32_e32 v69, s4, v24 - ; GCN-NEXT: v_mul_f32_e32 v70, s4, v25 - ; GCN-NEXT: v_max3_f32 v68, v68, v69, v70 - ; GCN-NEXT: v_mul_f32_e32 v69, s4, v26 - ; GCN-NEXT: v_mul_f32_e32 v70, s4, v27 - ; GCN-NEXT: v_max3_f32 v64, v68, v69, v70 - ; GCN-NEXT: v_mul_f32_e32 v65, s4, v28 - ; GCN-NEXT: v_mul_f32_e32 v68, s4, v29 - ; GCN-NEXT: v_max3_f32 v64, v64, v65, v68 - ; GCN-NEXT: v_mul_f32_e32 v65, s4, v30 - ; GCN-NEXT: v_mul_f32_e32 v68, s4, v31 - ; GCN-NEXT: v_max3_f32 v64, v64, v65, v68 - ; GCN-NEXT: v_mul_f32_e32 v65, s4, v0 - ; GCN-NEXT: v_mul_f32_e32 v66, s4, v1 - ; GCN-NEXT: v_max3_f32 v64, v64, v65, v66 - ; GCN-NEXT: v_mul_f32_e32 v65, s4, v2 - ; GCN-NEXT: v_mul_f32_e32 v66, s4, v3 - ; GCN-NEXT: v_max3_f32 v64, v64, v65, v66 - ; GCN-NEXT: v_mul_f32_e32 v65, s4, v4 - ; GCN-NEXT: v_mul_f32_e32 v66, s4, v5 - ; GCN-NEXT: v_max3_f32 v64, v64, v65, v66 - ; GCN-NEXT: v_mul_f32_e32 v65, s4, v6 - ; GCN-NEXT: v_mul_f32_e32 v66, s4, v7 - ; GCN-NEXT: v_max3_f32 v64, v64, v65, v66 - ; GCN-NEXT: v_mul_f32_e32 v65, s4, v8 - ; GCN-NEXT: v_mul_f32_e32 v66, s4, v9 - ; GCN-NEXT: v_max3_f32 v64, v64, v65, v66 - ; GCN-NEXT: v_mul_f32_e32 v65, s4, v10 - ; GCN-NEXT: v_mul_f32_e32 v66, s4, v11 - ; GCN-NEXT: v_max3_f32 v64, v64, v65, v66 - ; GCN-NEXT: v_mul_f32_e32 v65, s4, v12 - ; GCN-NEXT: v_mul_f32_e32 v66, s4, v13 - ; GCN-NEXT: v_max3_f32 v64, v64, v65, v66 - ; GCN-NEXT: v_mul_f32_e32 v65, s4, v14 - ; GCN-NEXT: v_mul_f32_e32 v66, s4, v15 - ; GCN-NEXT: v_max3_f32 v64, v64, v65, v66 - ; GCN-NEXT: ; implicit-def: $vgpr65 - ; GCN-NEXT: ; implicit-def: $vgpr66 - ; GCN-NEXT: ; implicit-def: $vgpr68 - ; GCN-NEXT: ; implicit-def: $vgpr67 - ; GCN-NEXT: v_add_u32_e32 v65, s7, v65 - ; GCN-NEXT: v_and_b32_e32 v65, 0x1fffffff, v65 - ; GCN-NEXT: v_mul_lo_u32 v65, v65, s6 - ; GCN-NEXT: v_add_lshl_u32 v135, v66, v65, 1 - ; GCN-NEXT: ds_bpermute_b32 v65, v133, v64 - ; GCN-NEXT: ; implicit-def: $vgpr66 - ; GCN-NEXT: v_lshl_add_u32 v136, v66, 1, v135 - ; GCN-NEXT: ; implicit-def: $vgpr66 - ; GCN-NEXT: v_lshl_add_u32 v137, v66, 1, v136 - ; GCN-NEXT: ; implicit-def: $vgpr66 - ; GCN-NEXT: ; implicit-def: $sgpr6_sgpr7 - ; GCN-NEXT: v_lshl_add_u32 v138, v66, 1, v137 ; GCN-NEXT: buffer_wbl2 sc0 sc1 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v135, v[94:95] - ; GCN-NEXT: v_max_f32_e32 v65, v65, v65 - ; GCN-NEXT: v_max_f32_e32 v64, v64, v65 - ; GCN-NEXT: ds_bpermute_b32 v65, v133, v64 + ; GCN-NEXT: ds_write_b64 v199, v[238:239] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v136, v[98:99] + ; GCN-NEXT: ds_write_b64 v200, v[240:241] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v137, v[102:103] + ; GCN-NEXT: ds_write_b64 v201, v[242:243] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v138, v[96:97] - ; GCN-NEXT: v_add_u32_e32 v68, v132, v68 - ; GCN-NEXT: v_cndmask_b32_e64 v64, v65, v64, s[6:7] - ; GCN-NEXT: v_max_f32_e32 v64, v64, v64 - ; GCN-NEXT: ; implicit-def: $vgpr65 - ; GCN-NEXT: v_max_f32_e32 v66, v65, v65 - ; GCN-NEXT: v_max_f32_e32 v134, v66, v64 - ; GCN-NEXT: ; implicit-def: $vgpr64 + ; GCN-NEXT: ds_write_b64 v202, v[244:245] ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_load_dwordx2 v[156:157], v68, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx2 v[192:193], v247, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_add_u32_e32 v64, v132, v64 - ; GCN-NEXT: buffer_load_dwordx2 v[158:159], v64, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[220:221], v[150:151], v[96:111] + ; GCN-NEXT: buffer_load_dwordx2 v[194:195], v248, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ; implicit-def: $vgpr66 - ; GCN-NEXT: v_add_u32_e32 v64, v132, v66 - ; GCN-NEXT: buffer_load_dwordx2 v[128:129], v64, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx2 v[218:219], v249, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_add_u32_e32 v64, v132, v67 - ; GCN-NEXT: buffer_load_dwordx2 v[130:131], v64, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx2 v[220:221], v250, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_fma_f32 v57, s4, v57, -v134 - ; GCN-NEXT: v_fma_f32 v48, s4, v48, -v134 - ; GCN-NEXT: v_fma_f32 v96, s4, v58, -v134 - ; GCN-NEXT: v_mul_f32_e32 v57, 0x3fb8aa3b, v57 - ; GCN-NEXT: v_mul_f32_e32 v48, 0x3fb8aa3b, v48 - ; GCN-NEXT: v_fma_f32 v64, s4, v49, -v134 - ; GCN-NEXT: v_exp_f32_e32 v163, v57 - ; GCN-NEXT: v_mul_f32_e32 v57, 0x3fb8aa3b, v96 - ; GCN-NEXT: v_fma_f32 v66, s4, v50, -v134 - ; GCN-NEXT: v_exp_f32_e32 v164, v57 - ; GCN-NEXT: v_exp_f32_e32 v49, v48 - ; GCN-NEXT: v_mul_f32_e32 v48, 0x3fb8aa3b, v64 - ; GCN-NEXT: v_fma_f32 v67, s4, v51, -v134 - ; GCN-NEXT: v_exp_f32_e32 v50, v48 - ; GCN-NEXT: v_mul_f32_e32 v48, 0x3fb8aa3b, v66 - ; GCN-NEXT: v_fma_f32 v68, s4, v52, -v134 - ; GCN-NEXT: v_exp_f32_e32 v51, v48 - ; GCN-NEXT: v_mul_f32_e32 v48, 0x3fb8aa3b, v67 - ; GCN-NEXT: v_fma_f32 v69, s4, v53, -v134 - ; GCN-NEXT: v_exp_f32_e32 v52, v48 - ; GCN-NEXT: v_mul_f32_e32 v48, 0x3fb8aa3b, v68 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND - ; GCN-NEXT: v_fma_f32 v70, s4, v54, -v134 - ; GCN-NEXT: v_exp_f32_e32 v53, v48 - ; GCN-NEXT: v_mul_f32_e32 v48, 0x3fb8aa3b, v69 - ; GCN-NEXT: v_fma_f32 v71, s4, v55, -v134 - ; GCN-NEXT: ds_read_b128 v[140:143], v139 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_exp_f32_e32 v54, v48 - ; GCN-NEXT: v_mul_f32_e32 v48, 0x3fb8aa3b, v70 - ; GCN-NEXT: v_exp_f32_e32 v55, v48 - ; GCN-NEXT: v_mul_f32_e32 v48, 0x3fb8aa3b, v71 - ; GCN-NEXT: ds_read_b128 v[144:147], v139 offset:576 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_fma_f32 v66, s4, v56, -v134 - ; GCN-NEXT: v_exp_f32_e32 v56, v48 - ; GCN-NEXT: v_sub_f32_e32 v48, v65, v134 - ; GCN-NEXT: v_cvt_f16_f32_e32 v64, v49 - ; GCN-NEXT: v_cvt_f16_f32_e32 v67, v50 - ; GCN-NEXT: v_cvt_f16_f32_e32 v68, v51 - ; GCN-NEXT: v_cvt_f16_f32_e32 v58, v52 - ; GCN-NEXT: v_mul_f32_e32 v48, 0x3fb8aa3b, v48 - ; GCN-NEXT: ds_read_b128 v[148:151], v139 offset:1152 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_exp_f32_e32 v48, v48 - ; GCN-NEXT: v_pack_b32_f16 v161, v68, v58 - ; GCN-NEXT: v_pack_b32_f16 v160, v64, v67 - ; GCN-NEXT: v_mul_f32_e32 v58, 0x3fb8aa3b, v66 - ; GCN-NEXT: ; implicit-def: $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79 - ; GCN-NEXT: ds_read_b128 v[152:155], v139 offset:1728 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_fma_f32 v162, s4, v61, -v134 - ; GCN-NEXT: v_cvt_f16_f32_e32 v61, v55 - ; GCN-NEXT: v_cvt_f16_f32_e32 v57, v56 - ; GCN-NEXT: v_pk_mul_f32 v[64:65], v[64:65], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[66:67], v[66:67], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[68:69], v[68:69], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[70:71], v[70:71], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[72:73], v[72:73], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[74:75], v[74:75], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[76:77], v[76:77], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[78:79], v[78:79], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: ; implicit-def: $vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95 - ; GCN-NEXT: v_fma_f32 v59, s4, v59, -v134 - ; GCN-NEXT: v_pk_mul_f32 v[80:81], v[80:81], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[140:141], v[160:161], v[64:79] - ; GCN-NEXT: v_mul_f32_e64 v82, v82, v48 - ; GCN-NEXT: v_mul_f32_e64 v83, v83, v48 - ; GCN-NEXT: v_mul_f32_e64 v84, v84, v48 - ; GCN-NEXT: v_mul_f32_e64 v85, v85, v48 - ; GCN-NEXT: v_mul_f32_e64 v86, v86, v48 - ; GCN-NEXT: v_mul_f32_e64 v87, v87, v48 - ; GCN-NEXT: v_pk_mul_f32 v[88:89], v[88:89], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[90:91], v[90:91], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[92:93], v[92:93], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[94:95], v[94:95], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: ; implicit-def: $vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111 - ; GCN-NEXT: v_exp_f32_e32 v58, v58 - ; GCN-NEXT: v_pk_mul_f32 v[96:97], v[96:97], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[144:145], v[160:161], v[80:95] - ; GCN-NEXT: v_mul_f32_e64 v98, v98, v48 - ; GCN-NEXT: v_mul_f32_e64 v99, v99, v48 - ; GCN-NEXT: v_mul_f32_e64 v100, v100, v48 - ; GCN-NEXT: v_mul_f32_e64 v101, v101, v48 - ; GCN-NEXT: v_mul_f32_e64 v102, v102, v48 - ; GCN-NEXT: v_mul_f32_e64 v103, v103, v48 - ; GCN-NEXT: v_pk_mul_f32 v[104:105], v[104:105], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[106:107], v[106:107], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[108:109], v[108:109], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[110:111], v[110:111], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pack_b32_f16 v145, v61, v57 - ; GCN-NEXT: v_mul_f32_e32 v57, 0x3fb8aa3b, v59 - ; GCN-NEXT: v_cvt_f16_f32_e32 v140, v53 - ; GCN-NEXT: v_cvt_f16_f32_e32 v141, v54 - ; GCN-NEXT: v_exp_f32_e32 v59, v57 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[148:149], v[160:161], v[96:111] - ; GCN-NEXT: v_fma_f32 v60, s4, v60, -v134 - ; GCN-NEXT: v_mul_f32_e64 v112, v112, v48 - ; GCN-NEXT: v_mul_f32_e64 v113, v113, v48 - ; GCN-NEXT: v_mul_f32_e64 v114, v114, v48 - ; GCN-NEXT: v_mul_f32_e64 v115, v115, v48 - ; GCN-NEXT: v_pk_mul_f32 v[116:117], v[116:117], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[118:119], v[118:119], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[120:121], v[120:121], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[122:123], v[122:123], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[124:125], v[124:125], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_pk_mul_f32 v[126:127], v[126:127], v[48:49] op_sel_hi:[1,0] - ; GCN-NEXT: v_fma_f32 v148, s4, v62, -v134 - ; GCN-NEXT: v_pack_b32_f16 v144, v140, v141 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[152:153], v[160:161], v[112:127] - ; GCN-NEXT: v_fma_f32 v152, s4, v63, -v134 - ; GCN-NEXT: v_mul_f32_e32 v149, 0x3fb8aa3b, v60 - ; GCN-NEXT: ; implicit-def: $vgpr57 - ; GCN-NEXT: ds_read_b128 v[60:63], v57 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_exp_f32_e32 v160, v149 - ; GCN-NEXT: v_fma_f32 v161, s4, v33, -v134 - ; GCN-NEXT: v_mul_f32_e32 v33, 0x3fb8aa3b, v148 - ; GCN-NEXT: v_cvt_f16_f32_e32 v153, v58 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[142:143], v[144:145], v[64:79] - ; GCN-NEXT: v_fma_f32 v32, s4, v32, -v134 - ; GCN-NEXT: ds_read_b128 v[140:143], v57 offset:576 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_fma_f32 v40, s4, v40, -v134 - ; GCN-NEXT: v_fma_f32 v44, s4, v44, -v134 - ; GCN-NEXT: v_fma_f32 v16, s4, v16, -v134 - ; GCN-NEXT: v_fma_f32 v166, s4, v20, -v134 - ; GCN-NEXT: v_fma_f32 v24, s4, v24, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[146:147], v[144:145], v[80:95] - ; GCN-NEXT: v_mul_f32_e32 v146, 0x3fb8aa3b, v162 - ; GCN-NEXT: v_cvt_f16_f32_e32 v147, v163 - ; GCN-NEXT: v_exp_f32_e32 v162, v146 - ; GCN-NEXT: v_cvt_f16_f32_e32 v146, v164 - ; GCN-NEXT: v_fma_f32 v28, s4, v28, -v134 - ; GCN-NEXT: v_pack_b32_f16 v148, v153, v147 - ; GCN-NEXT: v_fma_f32 v0, s4, v0, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[150:151], v[144:145], v[96:111] - ; GCN-NEXT: v_exp_f32_e32 v151, v33 - ; GCN-NEXT: v_cvt_f16_f32_e32 v33, v59 - ; GCN-NEXT: v_fma_f32 v150, s4, v34, -v134 - ; GCN-NEXT: v_fma_f32 v8, s4, v8, -v134 - ; GCN-NEXT: v_fma_f32 v12, s4, v12, -v134 - ; GCN-NEXT: v_pack_b32_f16 v149, v146, v33 - ; GCN-NEXT: v_mul_f32_e32 v33, 0x3fb8aa3b, v152 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[154:155], v[144:145], v[112:127] - ; GCN-NEXT: v_fma_f32 v152, s4, v35, -v134 - ; GCN-NEXT: v_exp_f32_e32 v153, v33 - ; GCN-NEXT: v_fma_f32 v155, s4, v36, -v134 - ; GCN-NEXT: v_perm_b32 v36, v158, v156, s5 - ; GCN-NEXT: v_cvt_f16_f32_e32 v154, v160 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[60:61], v[148:149], v[64:79] - ; GCN-NEXT: v_mul_f32_e32 v60, 0x3fb8aa3b, v32 - ; GCN-NEXT: ds_read_b128 v[32:35], v57 offset:1152 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ds_read_b128 v[144:147], v57 offset:1728 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mul_f32_e32 v61, 0x3fb8aa3b, v161 - ; GCN-NEXT: v_exp_f32_e32 v165, v60 - ; GCN-NEXT: v_perm_b32 v60, v158, v156, s8 - ; GCN-NEXT: v_fma_f32 v158, s4, v37, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[140:141], v[148:149], v[80:95] - ; GCN-NEXT: v_exp_f32_e32 v161, v61 - ; GCN-NEXT: v_perm_b32 v140, v159, v157, s8 - ; GCN-NEXT: v_perm_b32 v37, v130, v128, s5 - ; GCN-NEXT: v_perm_b32 v61, v130, v128, s8 - ; GCN-NEXT: v_perm_b32 v141, v131, v129, s8 + ; GCN-NEXT: v_perm_b32 v188, v194, v192, s5 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[164:165], v[144:145], v[80:95] + ; GCN-NEXT: v_perm_b32 v189, v220, v218, s5 + ; GCN-NEXT: v_perm_b32 v191, v220, v218, s7 + ; GCN-NEXT: v_perm_b32 v190, v194, v192, s7 + ; GCN-NEXT: v_perm_b32 v192, v195, v193, s5 + ; GCN-NEXT: v_perm_b32 v194, v195, v193, s7 + ; GCN-NEXT: v_perm_b32 v193, v221, v219, s5 + ; GCN-NEXT: v_perm_b32 v195, v221, v219, s7 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[166:167], v[146:147], v[80:95] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[168:169], v[140:141], v[80:95] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[170:171], v[142:143], v[80:95] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[172:173], v[148:149], v[80:95] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[214:215], v[136:137], v[96:111] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[174:175], v[150:151], v[80:95] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[216:217], v[138:139], v[96:111] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[176:177], v[136:137], v[80:95] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[226:227], v[132:133], v[96:111] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[178:179], v[138:139], v[80:95] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[160:161], v[136:137], v[64:79] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[230:231], v[128:129], v[112:127] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[228:229], v[134:135], v[96:111] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[180:181], v[132:133], v[80:95] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[162:163], v[138:139], v[64:79] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[232:233], v[130:131], v[112:127] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[234:235], v[128:129], v[96:111] + ; GCN-NEXT: s_nop 9 + ; GCN-NEXT: v_mul_f32_e32 v213, s4, v112 + ; GCN-NEXT: v_mul_f32_e32 v218, s4, v113 + ; GCN-NEXT: v_max3_f32 v213, v213, s14, v218 + ; GCN-NEXT: v_mul_f32_e32 v218, s4, v114 + ; GCN-NEXT: v_mul_f32_e32 v219, s4, v115 + ; GCN-NEXT: v_max3_f32 v213, v213, v218, v219 + ; GCN-NEXT: v_mul_f32_e32 v218, s4, v116 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[182:183], v[134:135], v[80:95] + ; GCN-NEXT: v_mul_f32_e32 v219, s4, v117 + ; GCN-NEXT: v_max3_f32 v213, v213, v218, v219 + ; GCN-NEXT: v_mul_f32_e32 v218, s4, v118 + ; GCN-NEXT: v_mul_f32_e32 v219, s4, v119 + ; GCN-NEXT: v_max3_f32 v213, v213, v218, v219 + ; GCN-NEXT: v_mul_f32_e32 v218, s4, v120 + ; GCN-NEXT: v_mul_f32_e32 v219, s4, v121 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[152:153], v[132:133], v[64:79] + ; GCN-NEXT: v_max3_f32 v213, v213, v218, v219 + ; GCN-NEXT: v_mul_f32_e32 v218, s4, v122 + ; GCN-NEXT: v_mul_f32_e32 v219, s4, v123 + ; GCN-NEXT: v_max3_f32 v213, v213, v218, v219 + ; GCN-NEXT: v_mul_f32_e32 v218, s4, v124 + ; GCN-NEXT: v_mul_f32_e32 v219, s4, v125 + ; GCN-NEXT: v_max3_f32 v213, v213, v218, v219 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[236:237], v[130:131], v[96:111] + ; GCN-NEXT: v_mul_f32_e32 v218, s4, v126 + ; GCN-NEXT: v_mul_f32_e32 v219, s4, v127 + ; GCN-NEXT: v_max3_f32 v213, v213, v218, v219 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[184:185], v[128:129], v[80:95] + ; GCN-NEXT: s_nop 6 + ; GCN-NEXT: v_mul_f32_e32 v214, s4, v96 + ; GCN-NEXT: v_mul_f32_e32 v215, s4, v97 + ; GCN-NEXT: v_max3_f32 v213, v213, v214, v215 + ; GCN-NEXT: v_mul_f32_e32 v214, s4, v98 + ; GCN-NEXT: v_mul_f32_e32 v215, s4, v99 + ; GCN-NEXT: v_max3_f32 v213, v213, v214, v215 + ; GCN-NEXT: v_mul_f32_e32 v214, s4, v100 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[154:155], v[134:135], v[64:79] + ; GCN-NEXT: v_mul_f32_e32 v215, s4, v101 + ; GCN-NEXT: v_max3_f32 v213, v213, v214, v215 + ; GCN-NEXT: v_mul_f32_e32 v214, s4, v102 + ; GCN-NEXT: v_mul_f32_e32 v215, s4, v103 + ; GCN-NEXT: v_max3_f32 v213, v213, v214, v215 + ; GCN-NEXT: v_mul_f32_e32 v214, s4, v104 + ; GCN-NEXT: v_mul_f32_e32 v215, s4, v105 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[186:187], v[130:131], v[80:95] + ; GCN-NEXT: v_max3_f32 v213, v213, v214, v215 + ; GCN-NEXT: v_mul_f32_e32 v214, s4, v106 + ; GCN-NEXT: v_mul_f32_e32 v215, s4, v107 + ; GCN-NEXT: v_max3_f32 v213, v213, v214, v215 + ; GCN-NEXT: v_mul_f32_e32 v214, s4, v108 + ; GCN-NEXT: v_mul_f32_e32 v215, s4, v109 + ; GCN-NEXT: v_max3_f32 v213, v213, v214, v215 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[156:157], v[128:129], v[64:79] + ; GCN-NEXT: v_mul_f32_e32 v214, s4, v110 + ; GCN-NEXT: v_mul_f32_e32 v215, s4, v111 + ; GCN-NEXT: v_max3_f32 v213, v213, v214, v215 + ; GCN-NEXT: v_mul_f32_e32 v140, s4, v80 + ; GCN-NEXT: v_mul_f32_e32 v141, s4, v81 + ; GCN-NEXT: v_max3_f32 v140, v213, v140, v141 + ; GCN-NEXT: v_mul_f32_e32 v141, s4, v82 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[158:159], v[130:131], v[64:79] + ; GCN-NEXT: v_mul_f32_e32 v142, s4, v83 + ; GCN-NEXT: v_max3_f32 v140, v140, v141, v142 + ; GCN-NEXT: v_mul_f32_e32 v141, s4, v84 + ; GCN-NEXT: v_mul_f32_e32 v142, s4, v85 + ; GCN-NEXT: v_max3_f32 v140, v140, v141, v142 + ; GCN-NEXT: v_mul_f32_e32 v141, s4, v86 + ; GCN-NEXT: v_mul_f32_e32 v142, s4, v87 + ; GCN-NEXT: v_max3_f32 v140, v140, v141, v142 + ; GCN-NEXT: v_mul_f32_e32 v141, s4, v88 + ; GCN-NEXT: v_mul_f32_e32 v142, s4, v89 + ; GCN-NEXT: v_max3_f32 v140, v140, v141, v142 + ; GCN-NEXT: v_mul_f32_e32 v141, s4, v90 + ; GCN-NEXT: v_mul_f32_e32 v142, s4, v91 + ; GCN-NEXT: v_max3_f32 v140, v140, v141, v142 + ; GCN-NEXT: v_mul_f32_e32 v141, s4, v92 + ; GCN-NEXT: v_mul_f32_e32 v142, s4, v93 + ; GCN-NEXT: v_max3_f32 v140, v140, v141, v142 + ; GCN-NEXT: v_mul_f32_e32 v141, s4, v94 + ; GCN-NEXT: v_mul_f32_e32 v142, s4, v95 + ; GCN-NEXT: v_max3_f32 v140, v140, v141, v142 + ; GCN-NEXT: v_mul_f32_e32 v128, s4, v64 + ; GCN-NEXT: v_mul_f32_e32 v129, s4, v65 + ; GCN-NEXT: v_max3_f32 v128, v140, v128, v129 + ; GCN-NEXT: v_mul_f32_e32 v129, s4, v66 + ; GCN-NEXT: v_mul_f32_e32 v130, s4, v67 + ; GCN-NEXT: v_max3_f32 v128, v128, v129, v130 + ; GCN-NEXT: v_mul_f32_e32 v129, s4, v68 + ; GCN-NEXT: v_mul_f32_e32 v130, s4, v69 + ; GCN-NEXT: v_max3_f32 v128, v128, v129, v130 + ; GCN-NEXT: v_mul_f32_e32 v129, s4, v70 + ; GCN-NEXT: v_mul_f32_e32 v130, s4, v71 + ; GCN-NEXT: v_max3_f32 v128, v128, v129, v130 + ; GCN-NEXT: v_mul_f32_e32 v129, s4, v72 + ; GCN-NEXT: v_mul_f32_e32 v130, s4, v73 + ; GCN-NEXT: v_max3_f32 v128, v128, v129, v130 + ; GCN-NEXT: v_mul_f32_e32 v129, s4, v74 + ; GCN-NEXT: v_mul_f32_e32 v130, s4, v75 + ; GCN-NEXT: v_max3_f32 v128, v128, v129, v130 + ; GCN-NEXT: v_mul_f32_e32 v129, s4, v76 + ; GCN-NEXT: v_mul_f32_e32 v130, s4, v77 + ; GCN-NEXT: v_max3_f32 v128, v128, v129, v130 + ; GCN-NEXT: v_mul_f32_e32 v129, s4, v78 + ; GCN-NEXT: v_mul_f32_e32 v130, s4, v79 + ; GCN-NEXT: v_max3_f32 v128, v128, v129, v130 + ; GCN-NEXT: ds_bpermute_b32 v129, v196, v128 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: ds_read_b128 v[130:133], v198 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: ds_read_b128 v[134:137], v198 offset:576 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_max_f32_e32 v129, v129, v129 + ; GCN-NEXT: v_max_f32_e32 v128, v128, v129 + ; GCN-NEXT: ds_bpermute_b32 v129, v196, v128 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: v_cndmask_b32_e64 v128, v129, v128, s[12:13] + ; GCN-NEXT: v_max_f32_e32 v128, v128, v128 + ; GCN-NEXT: v_max_f32_e32 v128, v212, v128 + ; GCN-NEXT: v_fma_f32 v113, s4, v113, -v128 + ; GCN-NEXT: v_mul_f32_e32 v138, 0x3fb8aa3b, v113 + ; GCN-NEXT: v_fma_f32 v113, s4, v114, -v128 + ; GCN-NEXT: v_mul_f32_e32 v139, 0x3fb8aa3b, v113 + ; GCN-NEXT: v_fma_f32 v113, s4, v115, -v128 + ; GCN-NEXT: v_mul_f32_e32 v140, 0x3fb8aa3b, v113 + ; GCN-NEXT: v_fma_f32 v113, s4, v116, -v128 + ; GCN-NEXT: v_mul_f32_e32 v141, 0x3fb8aa3b, v113 + ; GCN-NEXT: v_fma_f32 v113, s4, v117, -v128 + ; GCN-NEXT: v_mul_f32_e32 v142, 0x3fb8aa3b, v113 + ; GCN-NEXT: v_fma_f32 v113, s4, v118, -v128 + ; GCN-NEXT: v_fma_f32 v112, s4, v112, -v128 + ; GCN-NEXT: v_mul_f32_e32 v143, 0x3fb8aa3b, v113 + ; GCN-NEXT: v_fma_f32 v113, s4, v119, -v128 + ; GCN-NEXT: v_fma_f32 v118, s4, v120, -v128 + ; GCN-NEXT: v_fma_f32 v120, s4, v121, -v128 + ; GCN-NEXT: v_mul_f32_e32 v112, 0x3fb8aa3b, v112 + ; GCN-NEXT: v_mul_f32_e32 v144, 0x3fb8aa3b, v113 + ; GCN-NEXT: v_mul_f32_e32 v149, 0x3fb8aa3b, v120 + ; GCN-NEXT: v_fma_f32 v120, s4, v122, -v128 + ; GCN-NEXT: v_exp_f32_e32 v114, v138 + ; GCN-NEXT: v_exp_f32_e32 v115, v139 + ; GCN-NEXT: v_exp_f32_e32 v116, v140 + ; GCN-NEXT: v_exp_f32_e32 v117, v141 + ; GCN-NEXT: v_mul_f32_e32 v148, 0x3fb8aa3b, v118 + ; GCN-NEXT: v_exp_f32_e32 v118, v142 + ; GCN-NEXT: v_mul_f32_e32 v150, 0x3fb8aa3b, v120 + ; GCN-NEXT: v_exp_f32_e32 v120, v144 + ; GCN-NEXT: v_exp_f32_e32 v113, v112 + ; GCN-NEXT: v_cvt_f16_f32_e32 v119, v114 + ; GCN-NEXT: v_cvt_f16_f32_e32 v121, v116 + ; GCN-NEXT: v_sub_f32_e32 v129, v211, v128 + ; GCN-NEXT: v_cvt_f16_f32_e32 v112, v113 + ; GCN-NEXT: v_mul_f32_e32 v129, 0x3fb8aa3b, v129 + ; GCN-NEXT: ds_read_b128 v[138:141], v198 offset:1152 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_fma_f32 v122, s4, v123, -v128 + ; GCN-NEXT: v_pack_b32_f16 v146, v112, v119 + ; GCN-NEXT: v_cvt_f16_f32_e32 v112, v115 + ; GCN-NEXT: v_mul_f32_e32 v151, 0x3fb8aa3b, v122 + ; GCN-NEXT: v_cvt_f16_f32_e32 v123, v117 + ; GCN-NEXT: v_fma_f32 v122, s4, v124, -v128 + ; GCN-NEXT: v_pack_b32_f16 v147, v112, v121 + ; GCN-NEXT: v_exp_f32_e32 v112, v129 + ; GCN-NEXT: v_cvt_f16_f32_e32 v124, v118 + ; GCN-NEXT: v_mul_f32_e32 v129, 0x3fb8aa3b, v122 + ; GCN-NEXT: v_fma_f32 v125, s4, v125, -v128 + ; GCN-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[8:9], v[8:9], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[10:11], v[10:11], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[12:13], v[12:13], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[14:15], v[14:15], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[32:33], v[32:33], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[34:35], v[34:35], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[130:131], v[146:147], v[0:15] + ; GCN-NEXT: v_exp_f32_e32 v119, v143 + ; GCN-NEXT: ds_read_b128 v[142:145], v198 offset:1728 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_pk_mul_f32 v[36:37], v[36:37], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[38:39], v[38:39], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[40:41], v[40:41], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[42:43], v[42:43], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[44:45], v[44:45], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[46:47], v[46:47], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[16:17], v[16:17], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[18:19], v[18:19], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[134:135], v[146:147], v[32:47] + ; GCN-NEXT: v_mul_f32_e64 v20, v20, v112 + ; GCN-NEXT: v_mul_f32_e64 v21, v21, v112 + ; GCN-NEXT: v_mul_f32_e64 v22, v22, v112 + ; GCN-NEXT: v_mul_f32_e64 v23, v23, v112 + ; GCN-NEXT: v_mul_f32_e64 v24, v24, v112 + ; GCN-NEXT: v_mul_f32_e64 v25, v25, v112 + ; GCN-NEXT: v_pk_mul_f32 v[26:27], v[26:27], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[28:29], v[28:29], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[30:31], v[30:31], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[48:49], v[48:49], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[50:51], v[50:51], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[52:53], v[52:53], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[54:55], v[54:55], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[56:57], v[56:57], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[58:59], v[58:59], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[60:61], v[60:61], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pk_mul_f32 v[62:63], v[62:63], v[112:113] op_sel_hi:[1,0] + ; GCN-NEXT: v_pack_b32_f16 v134, v123, v124 + ; GCN-NEXT: v_cvt_f16_f32_e32 v130, v119 + ; GCN-NEXT: v_fma_f32 v124, s4, v126, -v128 + ; GCN-NEXT: v_cvt_f16_f32_e32 v126, v120 + ; GCN-NEXT: v_exp_f32_e32 v121, v148 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[138:139], v[146:147], v[16:31] + ; GCN-NEXT: v_exp_f32_e32 v122, v149 + ; GCN-NEXT: v_pack_b32_f16 v135, v130, v126 + ; GCN-NEXT: v_mul_f32_e32 v138, 0x3fb8aa3b, v124 + ; GCN-NEXT: v_cvt_f16_f32_e32 v126, v121 + ; GCN-NEXT: v_mul_f32_e32 v125, 0x3fb8aa3b, v125 + ; GCN-NEXT: v_fma_f32 v139, s4, v96, -v128 + ; GCN-NEXT: v_fma_f32 v127, s4, v127, -v128 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[142:143], v[146:147], v[48:63] + ; GCN-NEXT: v_exp_f32_e32 v123, v150 + ; GCN-NEXT: v_mul_f32_e32 v127, 0x3fb8aa3b, v127 + ; GCN-NEXT: v_fma_f32 v143, s4, v101, -v128 + ; GCN-NEXT: v_fma_f32 v64, s4, v64, -v128 + ; GCN-NEXT: v_fma_f32 v65, s4, v65, -v128 + ; GCN-NEXT: v_fma_f32 v68, s4, v68, -v128 + ; GCN-NEXT: v_fma_f32 v69, s4, v69, -v128 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[132:133], v[134:135], v[0:15] + ; GCN-NEXT: v_exp_f32_e32 v124, v151 + ; GCN-NEXT: ds_read_b128 v[130:133], v197 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: ds_read_b128 v[146:149], v197 offset:576 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[136:137], v[134:135], v[32:47] + ; GCN-NEXT: v_cvt_f16_f32_e32 v136, v122 + ; GCN-NEXT: v_exp_f32_e32 v96, v129 + ; GCN-NEXT: v_fma_f32 v137, s4, v97, -v128 + ; GCN-NEXT: v_mul_f32_e32 v129, 0x3fb8aa3b, v139 + ; GCN-NEXT: v_pack_b32_f16 v126, v126, v136 + ; GCN-NEXT: v_cvt_f16_f32_e32 v136, v123 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[140:141], v[134:135], v[16:31] + ; GCN-NEXT: v_exp_f32_e32 v97, v125 + ; GCN-NEXT: v_mul_f32_e32 v125, 0x3fb8aa3b, v137 + ; GCN-NEXT: v_fma_f32 v137, s4, v98, -v128 + ; GCN-NEXT: v_mul_f32_e32 v142, 0x3fb8aa3b, v137 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[144:145], v[134:135], v[48:63] + ; GCN-NEXT: v_cvt_f16_f32_e32 v134, v124 + ; GCN-NEXT: v_fma_f32 v135, s4, v99, -v128 + ; GCN-NEXT: v_exp_f32_e32 v98, v138 + ; GCN-NEXT: v_exp_f32_e32 v99, v127 + ; GCN-NEXT: v_mul_f32_e32 v150, 0x3fb8aa3b, v135 + ; GCN-NEXT: v_pack_b32_f16 v127, v136, v134 + ; GCN-NEXT: ds_read_b128 v[134:137], v197 offset:1152 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: ds_read_b128 v[138:141], v197 offset:1728 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[130:131], v[126:127], v[0:15] + ; GCN-NEXT: v_fma_f32 v131, s4, v100, -v128 + ; GCN-NEXT: v_cvt_f16_f32_e32 v130, v96 + ; GCN-NEXT: v_exp_f32_e32 v100, v129 + ; GCN-NEXT: v_mul_f32_e32 v129, 0x3fb8aa3b, v131 + ; GCN-NEXT: v_cvt_f16_f32_e32 v131, v97 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND ; GCN-NEXT: buffer_wbl2 sc0 sc1 - ; GCN-NEXT: ds_write_b64 v135, v[36:37] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[32:33], v[148:149], v[96:111] - ; GCN-NEXT: v_perm_b32 v32, v159, v157, s5 - ; GCN-NEXT: v_mul_f32_e32 v33, 0x3fb8aa3b, v150 - ; GCN-NEXT: v_cvt_f16_f32_e32 v150, v151 - ; GCN-NEXT: v_fma_f32 v157, s4, v38, -v134 - ; GCN-NEXT: v_cvt_f16_f32_e32 v38, v153 - ; GCN-NEXT: v_exp_f32_e32 v159, v33 - ; GCN-NEXT: v_perm_b32 v33, v131, v129, s5 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[144:145], v[148:149], v[112:127] - ; GCN-NEXT: v_pack_b32_f16 v129, v150, v38 - ; GCN-NEXT: v_mul_f32_e32 v38, 0x3fb8aa3b, v152 - ; GCN-NEXT: v_exp_f32_e32 v152, v38 + ; GCN-NEXT: ds_write_b64 v199, v[188:189] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v136, v[60:61] + ; GCN-NEXT: ds_write_b64 v200, v[190:191] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v137, v[32:33] - ; GCN-NEXT: ; implicit-def: $vgpr33 - ; GCN-NEXT: ; implicit-def: $vgpr38 + ; GCN-NEXT: ds_write_b64 v201, v[192:193] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v138, v[140:141] - ; GCN-NEXT: v_add_u32_e32 v38, v132, v38 - ; GCN-NEXT: v_add_u32_e32 v33, v132, v33 + ; GCN-NEXT: ds_write_b64 v202, v[194:195] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[146:147], v[126:127], v[32:47] + ; GCN-NEXT: v_exp_f32_e32 v101, v125 + ; GCN-NEXT: v_pack_b32_f16 v146, v130, v131 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_load_dwordx2 v[130:131], v38, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx2 v[130:131], v210, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: buffer_load_dwordx2 v[140:141], v33, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: v_mul_f32_e32 v125, 0x3fb8aa3b, v143 + ; GCN-NEXT: v_cvt_f16_f32_e32 v147, v98 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[134:135], v[126:127], v[16:31] + ; GCN-NEXT: v_fma_f32 v134, s4, v102, -v128 + ; GCN-NEXT: v_mul_f32_e32 v156, 0x3fb8aa3b, v134 + ; GCN-NEXT: buffer_load_dwordx2 v[134:135], v207, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ; implicit-def: $vgpr36 - ; GCN-NEXT: v_add_u32_e32 v33, v132, v36 - ; GCN-NEXT: ; implicit-def: $vgpr37 - ; GCN-NEXT: buffer_load_dwordx2 v[144:145], v33, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: v_exp_f32_e32 v102, v142 + ; GCN-NEXT: buffer_load_dwordx2 v[142:143], v208, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_add_u32_e32 v33, v132, v37 - ; GCN-NEXT: buffer_load_dwordx2 v[148:149], v33, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx2 v[144:145], v209, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_cvt_f16_f32_e32 v156, v162 - ; GCN-NEXT: v_mul_f32_e32 v32, 0x3fb8aa3b, v155 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND - ; GCN-NEXT: v_cvt_f16_f32_e32 v33, v165 - ; GCN-NEXT: v_pack_b32_f16 v128, v154, v156 - ; GCN-NEXT: v_fma_f32 v150, s4, v39, -v134 - ; GCN-NEXT: ds_read_b128 v[36:39], v139 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[62:63], v[128:129], v[64:79] - ; GCN-NEXT: v_exp_f32_e32 v154, v32 - ; GCN-NEXT: v_mul_f32_e32 v32, 0x3fb8aa3b, v158 - ; GCN-NEXT: ds_read_b128 v[60:63], v139 offset:576 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_fma_f32 v156, s4, v42, -v134 - ; GCN-NEXT: v_perm_b32 v20, v140, v130, s5 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[142:143], v[128:129], v[80:95] - ; GCN-NEXT: v_exp_f32_e32 v155, v32 - ; GCN-NEXT: v_mul_f32_e32 v32, 0x3fb8aa3b, v157 - ; GCN-NEXT: v_cvt_f16_f32_e32 v142, v161 - ; GCN-NEXT: v_fma_f32 v143, s4, v41, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[34:35], v[128:129], v[96:111] - ; GCN-NEXT: v_cvt_f16_f32_e32 v34, v159 - ; GCN-NEXT: v_exp_f32_e32 v157, v32 - ; GCN-NEXT: v_cvt_f16_f32_e32 v32, v152 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[146:147], v[128:129], v[112:127] - ; GCN-NEXT: v_pack_b32_f16 v129, v34, v32 - ; GCN-NEXT: v_mul_f32_e32 v32, 0x3fb8aa3b, v150 - ; GCN-NEXT: v_pack_b32_f16 v128, v33, v142 - ; GCN-NEXT: v_exp_f32_e32 v146, v32 - ; GCN-NEXT: ds_read_b128 v[32:35], v139 offset:1152 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_fma_f32 v142, s4, v43, -v134 - ; GCN-NEXT: v_fma_f32 v150, s4, v46, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[36:37], v[128:129], v[64:79] - ; GCN-NEXT: v_mul_f32_e32 v36, 0x3fb8aa3b, v40 - ; GCN-NEXT: ds_read_b128 v[40:43], v139 offset:1728 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_exp_f32_e32 v147, v36 - ; GCN-NEXT: v_mul_f32_e32 v36, 0x3fb8aa3b, v143 - ; GCN-NEXT: v_cvt_f16_f32_e32 v37, v154 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[60:61], v[128:129], v[80:95] - ; GCN-NEXT: v_exp_f32_e32 v143, v36 - ; GCN-NEXT: v_cvt_f16_f32_e32 v60, v155 - ; GCN-NEXT: v_mul_f32_e32 v36, 0x3fb8aa3b, v142 - ; GCN-NEXT: v_fma_f32 v61, s4, v45, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[32:33], v[128:129], v[96:111] - ; GCN-NEXT: v_mul_f32_e32 v32, 0x3fb8aa3b, v156 - ; GCN-NEXT: v_cvt_f16_f32_e32 v33, v157 - ; GCN-NEXT: v_exp_f32_e32 v156, v32 - ; GCN-NEXT: v_cvt_f16_f32_e32 v32, v146 - ; GCN-NEXT: v_pack_b32_f16 v33, v33, v32 - ; GCN-NEXT: v_pack_b32_f16 v32, v37, v60 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[40:41], v[128:129], v[112:127] - ; GCN-NEXT: v_exp_f32_e32 v129, v36 - ; GCN-NEXT: v_mul_f32_e32 v40, 0x3fb8aa3b, v44 - ; GCN-NEXT: v_cvt_f16_f32_e32 v60, v147 - ; GCN-NEXT: v_fma_f32 v128, s4, v47, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[38:39], v[32:33], v[64:79] - ; GCN-NEXT: ds_read_b128 v[36:39], v57 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_exp_f32_e32 v142, v40 - ; GCN-NEXT: v_mul_f32_e32 v40, 0x3fb8aa3b, v61 - ; GCN-NEXT: v_cvt_f16_f32_e32 v61, v143 - ; GCN-NEXT: ds_read_b128 v[44:47], v57 offset:576 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[62:63], v[32:33], v[80:95] - ; GCN-NEXT: v_fma_f32 v62, s4, v17, -v134 - ; GCN-NEXT: v_mul_f32_e32 v17, 0x3fb8aa3b, v150 - ; GCN-NEXT: v_exp_f32_e32 v63, v40 - ; GCN-NEXT: v_pack_b32_f16 v40, v60, v61 - ; GCN-NEXT: v_fma_f32 v150, s4, v18, -v134 - ; GCN-NEXT: v_fma_f32 v60, s4, v19, -v134 - ; GCN-NEXT: v_cvt_f16_f32_e32 v61, v142 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[34:35], v[32:33], v[96:111] - ; GCN-NEXT: v_cvt_f16_f32_e32 v34, v156 - ; GCN-NEXT: v_exp_f32_e32 v158, v17 - ; GCN-NEXT: v_cvt_f16_f32_e32 v17, v129 - ; GCN-NEXT: v_pack_b32_f16 v41, v34, v17 - ; GCN-NEXT: v_mul_f32_e32 v17, 0x3fb8aa3b, v128 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[42:43], v[32:33], v[112:127] - ; GCN-NEXT: v_exp_f32_e32 v128, v17 - ; GCN-NEXT: v_perm_b32 v42, v141, v131, s8 - ; GCN-NEXT: v_perm_b32 v43, v149, v145, s8 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[36:37], v[40:41], v[64:79] - ; GCN-NEXT: v_mul_f32_e32 v36, 0x3fb8aa3b, v16 - ; GCN-NEXT: ds_read_b128 v[16:19], v57 offset:1152 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ds_read_b128 v[32:35], v57 offset:1728 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mul_f32_e32 v37, 0x3fb8aa3b, v62 - ; GCN-NEXT: v_exp_f32_e32 v167, v36 - ; GCN-NEXT: v_perm_b32 v36, v140, v130, s8 - ; GCN-NEXT: v_fma_f32 v62, s4, v21, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[44:45], v[40:41], v[80:95] - ; GCN-NEXT: v_exp_f32_e32 v130, v37 - ; GCN-NEXT: v_cvt_f16_f32_e32 v45, v158 - ; GCN-NEXT: v_perm_b32 v21, v148, v144, s5 - ; GCN-NEXT: v_perm_b32 v37, v148, v144, s8 - ; GCN-NEXT: v_cvt_f16_f32_e32 v44, v63 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[138:139], v[126:127], v[48:63] + ; GCN-NEXT: v_cvt_f16_f32_e32 v126, v99 + ; GCN-NEXT: v_fma_f32 v127, s4, v103, -v128 + ; GCN-NEXT: v_exp_f32_e32 v103, v150 + ; GCN-NEXT: v_fma_f32 v139, s4, v105, -v128 + ; GCN-NEXT: v_pack_b32_f16 v147, v147, v126 + ; GCN-NEXT: v_mul_f32_e32 v138, 0x3fb8aa3b, v127 + ; GCN-NEXT: v_perm_b32 v152, v135, v131, s5 + ; GCN-NEXT: v_perm_b32 v154, v135, v131, s7 + ; GCN-NEXT: v_fma_f32 v135, s4, v104, -v128 + ; GCN-NEXT: v_perm_b32 v126, v134, v130, s5 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[132:133], v[146:147], v[0:15] + ; GCN-NEXT: v_perm_b32 v150, v134, v130, s7 + ; GCN-NEXT: v_cvt_f16_f32_e32 v134, v100 + ; GCN-NEXT: v_exp_f32_e32 v104, v129 + ; GCN-NEXT: v_mul_f32_e32 v129, 0x3fb8aa3b, v135 + ; GCN-NEXT: v_cvt_f16_f32_e32 v135, v101 + ; GCN-NEXT: ds_read_b128 v[130:133], v198 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_perm_b32 v127, v144, v142, s5 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[148:149], v[146:147], v[32:47] + ; GCN-NEXT: v_pack_b32_f16 v148, v134, v135 + ; GCN-NEXT: v_fma_f32 v135, s4, v106, -v128 + ; GCN-NEXT: v_exp_f32_e32 v105, v125 + ; GCN-NEXT: v_cvt_f16_f32_e32 v134, v102 + ; GCN-NEXT: v_perm_b32 v151, v144, v142, s7 + ; GCN-NEXT: v_perm_b32 v153, v145, v143, s5 + ; GCN-NEXT: v_perm_b32 v155, v145, v143, s7 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[136:137], v[146:147], v[16:31] + ; GCN-NEXT: v_exp_f32_e32 v106, v156 + ; GCN-NEXT: v_mul_f32_e32 v156, 0x3fb8aa3b, v135 + ; GCN-NEXT: v_cvt_f16_f32_e32 v135, v103 + ; GCN-NEXT: v_fma_f32 v136, s4, v107, -v128 + ; GCN-NEXT: ds_read_b128 v[142:145], v198 offset:576 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_mul_f32_e32 v125, 0x3fb8aa3b, v139 + ; GCN-NEXT: v_pack_b32_f16 v149, v134, v135 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[140:141], v[146:147], v[48:63] + ; GCN-NEXT: v_mul_f32_e32 v146, 0x3fb8aa3b, v136 + ; GCN-NEXT: ds_read_b128 v[134:137], v198 offset:1152 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_exp_f32_e32 v107, v138 + ; GCN-NEXT: ds_read_b128 v[138:141], v198 offset:1728 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[130:131], v[148:149], v[0:15] + ; GCN-NEXT: v_fma_f32 v131, s4, v108, -v128 + ; GCN-NEXT: v_cvt_f16_f32_e32 v130, v104 + ; GCN-NEXT: v_exp_f32_e32 v108, v129 + ; GCN-NEXT: v_mul_f32_e32 v129, 0x3fb8aa3b, v131 + ; GCN-NEXT: v_cvt_f16_f32_e32 v131, v105 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[142:143], v[148:149], v[32:47] + ; GCN-NEXT: v_fma_f32 v142, s4, v109, -v128 + ; GCN-NEXT: v_exp_f32_e32 v109, v125 + ; GCN-NEXT: v_mul_f32_e32 v125, 0x3fb8aa3b, v142 + ; GCN-NEXT: v_pack_b32_f16 v142, v130, v131 + ; GCN-NEXT: v_fma_f32 v131, s4, v110, -v128 + ; GCN-NEXT: v_cvt_f16_f32_e32 v130, v106 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[134:135], v[148:149], v[16:31] + ; GCN-NEXT: v_mul_f32_e32 v134, 0x3fb8aa3b, v131 + ; GCN-NEXT: v_cvt_f16_f32_e32 v131, v107 + ; GCN-NEXT: v_exp_f32_e32 v110, v156 + ; GCN-NEXT: v_fma_f32 v135, s4, v111, -v128 + ; GCN-NEXT: v_mul_f32_e32 v135, 0x3fb8aa3b, v135 + ; GCN-NEXT: v_pack_b32_f16 v143, v130, v131 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[138:139], v[148:149], v[48:63] + ; GCN-NEXT: v_exp_f32_e32 v111, v146 + ; GCN-NEXT: v_fma_f32 v139, s4, v80, -v128 + ; GCN-NEXT: v_cvt_f16_f32_e32 v138, v108 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[132:133], v[142:143], v[0:15] + ; GCN-NEXT: v_exp_f32_e32 v80, v129 + ; GCN-NEXT: ds_read_b128 v[130:133], v197 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: ds_read_b128 v[146:149], v197 offset:576 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_mul_f32_e32 v129, 0x3fb8aa3b, v139 + ; GCN-NEXT: v_cvt_f16_f32_e32 v139, v109 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[144:145], v[142:143], v[32:47] + ; GCN-NEXT: v_fma_f32 v144, s4, v81, -v128 + ; GCN-NEXT: v_exp_f32_e32 v81, v125 + ; GCN-NEXT: v_mul_f32_e32 v125, 0x3fb8aa3b, v144 + ; GCN-NEXT: v_pack_b32_f16 v144, v138, v139 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[136:137], v[142:143], v[16:31] + ; GCN-NEXT: v_cvt_f16_f32_e32 v136, v110 + ; GCN-NEXT: v_fma_f32 v137, s4, v82, -v128 + ; GCN-NEXT: v_exp_f32_e32 v82, v134 + ; GCN-NEXT: v_cvt_f16_f32_e32 v134, v111 + ; GCN-NEXT: v_mul_f32_e32 v156, 0x3fb8aa3b, v137 + ; GCN-NEXT: v_fma_f32 v137, s4, v83, -v128 + ; GCN-NEXT: v_mul_f32_e32 v157, 0x3fb8aa3b, v137 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[140:141], v[142:143], v[48:63] + ; GCN-NEXT: v_exp_f32_e32 v83, v135 + ; GCN-NEXT: v_pack_b32_f16 v145, v136, v134 + ; GCN-NEXT: ds_read_b128 v[134:137], v197 offset:1152 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: ds_read_b128 v[138:141], v197 offset:1728 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND ; GCN-NEXT: buffer_wbl2 sc0 sc1 - ; GCN-NEXT: ds_write_b64 v135, v[20:21] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[16:17], v[40:41], v[96:111] - ; GCN-NEXT: v_perm_b32 v16, v141, v131, s5 - ; GCN-NEXT: v_fma_f32 v131, s4, v22, -v134 - ; GCN-NEXT: v_cvt_f16_f32_e32 v22, v128 - ; GCN-NEXT: v_mul_f32_e32 v17, 0x3fb8aa3b, v150 - ; GCN-NEXT: v_exp_f32_e32 v140, v17 - ; GCN-NEXT: v_perm_b32 v17, v149, v145, s5 + ; GCN-NEXT: ds_write_b64 v199, v[126:127] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v136, v[36:37] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[32:33], v[40:41], v[112:127] - ; GCN-NEXT: v_pack_b32_f16 v33, v45, v22 - ; GCN-NEXT: v_mul_f32_e32 v22, 0x3fb8aa3b, v60 - ; GCN-NEXT: v_exp_f32_e32 v144, v22 + ; GCN-NEXT: ds_write_b64 v200, v[150:151] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[130:131], v[144:145], v[0:15] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v137, v[16:17] - ; GCN-NEXT: ; implicit-def: $vgpr17 - ; GCN-NEXT: ; implicit-def: $vgpr22 + ; GCN-NEXT: ds_write_b64 v201, v[152:153] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v138, v[42:43] - ; GCN-NEXT: v_add_u32_e32 v22, v132, v22 - ; GCN-NEXT: v_add_u32_e32 v17, v132, v17 - ; GCN-NEXT: ; implicit-def: $vgpr20 - ; GCN-NEXT: ; implicit-def: $vgpr21 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_load_dwordx2 v[40:41], v22, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: ds_write_b64 v202, v[154:155] + ; GCN-NEXT: v_fma_f32 v127, s4, v84, -v128 + ; GCN-NEXT: v_exp_f32_e32 v84, v129 + ; GCN-NEXT: v_fma_f32 v130, s4, v85, -v128 + ; GCN-NEXT: v_cvt_f16_f32_e32 v126, v80 + ; GCN-NEXT: v_mul_f32_e32 v129, 0x3fb8aa3b, v127 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[146:147], v[144:145], v[32:47] + ; GCN-NEXT: v_exp_f32_e32 v85, v125 + ; GCN-NEXT: v_mul_f32_e32 v125, 0x3fb8aa3b, v130 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_load_dwordx2 v[130:131], v206, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: buffer_load_dwordx2 v[42:43], v17, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: v_cvt_f16_f32_e32 v127, v81 + ; GCN-NEXT: v_pack_b32_f16 v126, v126, v127 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[134:135], v[144:145], v[16:31] + ; GCN-NEXT: v_fma_f32 v134, s4, v86, -v128 + ; GCN-NEXT: v_mul_f32_e32 v158, 0x3fb8aa3b, v134 + ; GCN-NEXT: buffer_load_dwordx2 v[134:135], v203, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_add_u32_e32 v20, v132, v20 - ; GCN-NEXT: v_add_u32_e32 v21, v132, v21 - ; GCN-NEXT: v_pack_b32_f16 v32, v61, v44 - ; GCN-NEXT: buffer_load_dwordx2 v[44:45], v20, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx2 v[142:143], v204, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: buffer_load_dwordx2 v[60:61], v21, s[0:3], 0 offen sc0 sc1 + ; GCN-NEXT: buffer_load_dwordx2 v[146:147], v205, s[0:3], 0 offen sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mul_f32_e32 v16, 0x3fb8aa3b, v166 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[38:39], v[32:33], v[64:79] - ; GCN-NEXT: v_exp_f32_e32 v132, v16 - ; GCN-NEXT: v_mul_f32_e32 v16, 0x3fb8aa3b, v62 + ; GCN-NEXT: v_cvt_f16_f32_e32 v127, v82 + ; GCN-NEXT: v_exp_f32_e32 v86, v156 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[138:139], v[144:145], v[48:63] + ; GCN-NEXT: v_cvt_f16_f32_e32 v138, v83 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND - ; GCN-NEXT: v_cvt_f16_f32_e32 v17, v167 - ; GCN-NEXT: v_fma_f32 v141, s4, v23, -v134 - ; GCN-NEXT: ds_read_b128 v[20:23], v139 + ; GCN-NEXT: v_fma_f32 v139, s4, v87, -v128 + ; GCN-NEXT: v_exp_f32_e32 v87, v157 + ; GCN-NEXT: v_pack_b32_f16 v127, v127, v138 + ; GCN-NEXT: v_fma_f32 v138, s4, v89, -v128 + ; GCN-NEXT: v_mul_f32_e32 v139, 0x3fb8aa3b, v139 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[132:133], v[126:127], v[0:15] + ; GCN-NEXT: ; implicit-def: $sgpr0 + ; GCN-NEXT: v_perm_b32 v154, v135, v131, s5 + ; GCN-NEXT: v_perm_b32 v156, v135, v131, s7 + ; GCN-NEXT: v_fma_f32 v135, s4, v88, -v128 + ; GCN-NEXT: v_perm_b32 v150, v134, v130, s5 + ; GCN-NEXT: v_perm_b32 v152, v134, v130, s7 + ; GCN-NEXT: ds_read_b128 v[130:133], v198 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_cvt_f16_f32_e32 v134, v84 + ; GCN-NEXT: v_exp_f32_e32 v88, v129 + ; GCN-NEXT: v_mul_f32_e32 v129, 0x3fb8aa3b, v135 + ; GCN-NEXT: v_cvt_f16_f32_e32 v135, v85 + ; GCN-NEXT: v_perm_b32 v151, v146, v142, s5 + ; GCN-NEXT: v_perm_b32 v153, v146, v142, s7 + ; GCN-NEXT: v_perm_b32 v155, v147, v143, s5 + ; GCN-NEXT: v_perm_b32 v157, v147, v143, s7 + ; GCN-NEXT: ds_read_b128 v[142:145], v198 offset:576 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[148:149], v[126:127], v[32:47] + ; GCN-NEXT: v_exp_f32_e32 v89, v125 + ; GCN-NEXT: v_pack_b32_f16 v146, v134, v135 + ; GCN-NEXT: v_cvt_f16_f32_e32 v134, v86 + ; GCN-NEXT: v_fma_f32 v135, s4, v90, -v128 + ; GCN-NEXT: v_mul_f32_e32 v125, 0x3fb8aa3b, v138 + ; GCN-NEXT: v_mul_f32_e32 v148, 0x3fb8aa3b, v135 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[136:137], v[126:127], v[16:31] + ; GCN-NEXT: v_exp_f32_e32 v90, v158 + ; GCN-NEXT: v_mul_f32_e32 v158, 0x3fb8aa3b, v64 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[140:141], v[126:127], v[48:63] + ; GCN-NEXT: v_cvt_f16_f32_e32 v126, v87 + ; GCN-NEXT: v_fma_f32 v127, s4, v91, -v128 + ; GCN-NEXT: v_exp_f32_e32 v91, v139 + ; GCN-NEXT: v_mul_f32_e32 v127, 0x3fb8aa3b, v127 + ; GCN-NEXT: v_pack_b32_f16 v147, v134, v126 + ; GCN-NEXT: ds_read_b128 v[134:137], v198 offset:1152 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: ds_read_b128 v[138:141], v198 offset:1728 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[130:131], v[146:147], v[0:15] + ; GCN-NEXT: v_fma_f32 v130, s4, v92, -v128 + ; GCN-NEXT: v_cvt_f16_f32_e32 v126, v88 + ; GCN-NEXT: v_exp_f32_e32 v92, v129 + ; GCN-NEXT: v_mul_f32_e32 v129, 0x3fb8aa3b, v130 + ; GCN-NEXT: v_cvt_f16_f32_e32 v130, v89 + ; GCN-NEXT: v_fma_f32 v131, s4, v93, -v128 + ; GCN-NEXT: v_pack_b32_f16 v130, v126, v130 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[142:143], v[146:147], v[32:47] + ; GCN-NEXT: v_exp_f32_e32 v93, v125 + ; GCN-NEXT: v_fma_f32 v126, s4, v94, -v128 + ; GCN-NEXT: v_cvt_f16_f32_e32 v125, v90 + ; GCN-NEXT: v_mul_f32_e32 v143, 0x3fb8aa3b, v126 + ; GCN-NEXT: v_cvt_f16_f32_e32 v126, v91 + ; GCN-NEXT: v_mul_f32_e32 v142, 0x3fb8aa3b, v131 + ; GCN-NEXT: v_fma_f32 v131, s4, v95, -v128 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[134:135], v[146:147], v[16:31] + ; GCN-NEXT: v_exp_f32_e32 v94, v148 + ; GCN-NEXT: v_cvt_f16_f32_e32 v64, v93 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[138:139], v[146:147], v[48:63] + ; GCN-NEXT: v_exp_f32_e32 v95, v127 + ; GCN-NEXT: v_cvt_f16_f32_e32 v127, v92 + ; GCN-NEXT: v_mul_f32_e32 v138, 0x3fb8aa3b, v131 + ; GCN-NEXT: v_pack_b32_f16 v131, v125, v126 + ; GCN-NEXT: s_nop 1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[132:133], v[130:131], v[0:15] + ; GCN-NEXT: v_exp_f32_e32 v125, v129 + ; GCN-NEXT: ds_read_b128 v[132:135], v197 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ds_read_b128 v[36:39], v139 offset:576 + ; GCN-NEXT: ds_read_b128 v[146:149], v197 offset:576 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[144:145], v[130:131], v[32:47] + ; GCN-NEXT: v_mul_f32_e32 v144, 0x3fb8aa3b, v65 + ; GCN-NEXT: v_fma_f32 v65, s4, v66, -v128 + ; GCN-NEXT: v_exp_f32_e32 v126, v142 + ; GCN-NEXT: v_pack_b32_f16 v142, v127, v64 + ; GCN-NEXT: v_cvt_f16_f32_e32 v64, v94 + ; GCN-NEXT: v_mul_f32_e32 v145, 0x3fb8aa3b, v65 + ; GCN-NEXT: v_cvt_f16_f32_e32 v65, v95 + ; GCN-NEXT: v_fma_f32 v66, s4, v67, -v128 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[136:137], v[130:131], v[16:31] + ; GCN-NEXT: v_exp_f32_e32 v127, v143 + ; GCN-NEXT: v_pack_b32_f16 v143, v64, v65 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[140:141], v[130:131], v[48:63] + ; GCN-NEXT: v_exp_f32_e32 v129, v138 + ; GCN-NEXT: v_mul_f32_e32 v141, 0x3fb8aa3b, v66 + ; GCN-NEXT: ds_read_b128 v[64:67], v197 offset:1152 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: ds_read_b128 v[136:139], v197 offset:1728 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[46:47], v[32:33], v[80:95] - ; GCN-NEXT: v_exp_f32_e32 v62, v16 - ; GCN-NEXT: v_mul_f32_e32 v16, 0x3fb8aa3b, v131 - ; GCN-NEXT: v_cvt_f16_f32_e32 v46, v130 - ; GCN-NEXT: v_fma_f32 v47, s4, v25, -v134 - ; GCN-NEXT: v_fma_f32 v131, s4, v26, -v134 - ; GCN-NEXT: v_fma_f32 v149, s4, v4, -v134 - ; GCN-NEXT: ; implicit-def: $sgpr0 - ; GCN-NEXT: v_perm_b32 v4, v42, v40, s5 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[18:19], v[32:33], v[96:111] - ; GCN-NEXT: v_cvt_f16_f32_e32 v18, v140 - ; GCN-NEXT: v_exp_f32_e32 v145, v16 - ; GCN-NEXT: v_cvt_f16_f32_e32 v16, v144 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[34:35], v[32:33], v[112:127] - ; GCN-NEXT: v_pack_b32_f16 v33, v18, v16 - ; GCN-NEXT: v_mul_f32_e32 v16, 0x3fb8aa3b, v141 - ; GCN-NEXT: v_pack_b32_f16 v32, v17, v46 - ; GCN-NEXT: v_exp_f32_e32 v35, v16 - ; GCN-NEXT: ds_read_b128 v[16:19], v139 offset:1152 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_fma_f32 v34, s4, v27, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[20:21], v[32:33], v[64:79] - ; GCN-NEXT: v_mul_f32_e32 v20, 0x3fb8aa3b, v24 - ; GCN-NEXT: ds_read_b128 v[24:27], v139 offset:1728 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_exp_f32_e32 v46, v20 - ; GCN-NEXT: v_mul_f32_e32 v20, 0x3fb8aa3b, v47 - ; GCN-NEXT: v_cvt_f16_f32_e32 v21, v132 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[36:37], v[32:33], v[80:95] - ; GCN-NEXT: v_exp_f32_e32 v47, v20 - ; GCN-NEXT: v_cvt_f16_f32_e32 v36, v62 - ; GCN-NEXT: v_mul_f32_e32 v20, 0x3fb8aa3b, v34 - ; GCN-NEXT: v_fma_f32 v37, s4, v29, -v134 - ; GCN-NEXT: v_cvt_f16_f32_e32 v34, v46 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[16:17], v[32:33], v[96:111] - ; GCN-NEXT: v_mul_f32_e32 v16, 0x3fb8aa3b, v131 - ; GCN-NEXT: v_cvt_f16_f32_e32 v17, v145 - ; GCN-NEXT: v_exp_f32_e32 v141, v16 - ; GCN-NEXT: v_cvt_f16_f32_e32 v16, v35 - ; GCN-NEXT: v_fma_f32 v131, s4, v30, -v134 - ; GCN-NEXT: v_pack_b32_f16 v17, v17, v16 - ; GCN-NEXT: v_pack_b32_f16 v16, v21, v36 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[24:25], v[32:33], v[112:127] - ; GCN-NEXT: v_exp_f32_e32 v33, v20 - ; GCN-NEXT: v_mul_f32_e32 v24, 0x3fb8aa3b, v28 - ; GCN-NEXT: v_fma_f32 v32, s4, v31, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[22:23], v[16:17], v[64:79] - ; GCN-NEXT: ds_read_b128 v[20:23], v57 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_exp_f32_e32 v36, v24 - ; GCN-NEXT: v_mul_f32_e32 v24, 0x3fb8aa3b, v37 - ; GCN-NEXT: v_cvt_f16_f32_e32 v37, v47 - ; GCN-NEXT: ds_read_b128 v[28:31], v57 offset:576 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[38:39], v[16:17], v[80:95] - ; GCN-NEXT: v_fma_f32 v38, s4, v1, -v134 - ; GCN-NEXT: v_mul_f32_e32 v1, 0x3fb8aa3b, v131 - ; GCN-NEXT: v_exp_f32_e32 v39, v24 - ; GCN-NEXT: v_pack_b32_f16 v24, v34, v37 - ; GCN-NEXT: v_fma_f32 v131, s4, v2, -v134 - ; GCN-NEXT: v_cvt_f16_f32_e32 v37, v36 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[18:19], v[16:17], v[96:111] - ; GCN-NEXT: v_cvt_f16_f32_e32 v18, v141 - ; GCN-NEXT: v_exp_f32_e32 v148, v1 - ; GCN-NEXT: v_cvt_f16_f32_e32 v1, v33 - ; GCN-NEXT: v_pack_b32_f16 v25, v18, v1 - ; GCN-NEXT: v_mul_f32_e32 v1, 0x3fb8aa3b, v32 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[26:27], v[16:17], v[112:127] - ; GCN-NEXT: v_fma_f32 v32, s4, v3, -v134 - ; GCN-NEXT: v_exp_f32_e32 v34, v1 - ; GCN-NEXT: v_perm_b32 v26, v43, v41, s8 - ; GCN-NEXT: v_perm_b32 v27, v61, v45, s8 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[20:21], v[24:25], v[64:79] - ; GCN-NEXT: v_mul_f32_e32 v20, 0x3fb8aa3b, v0 - ; GCN-NEXT: ds_read_b128 v[0:3], v57 offset:1152 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ds_read_b128 v[16:19], v57 offset:1728 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mul_f32_e32 v21, 0x3fb8aa3b, v38 - ; GCN-NEXT: v_exp_f32_e32 v150, v20 - ; GCN-NEXT: v_perm_b32 v20, v42, v40, s8 - ; GCN-NEXT: v_cvt_f16_f32_e32 v40, v148 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[28:29], v[24:25], v[80:95] - ; GCN-NEXT: v_exp_f32_e32 v38, v21 - ; GCN-NEXT: v_cvt_f16_f32_e32 v28, v39 - ; GCN-NEXT: v_fma_f32 v29, s4, v5, -v134 - ; GCN-NEXT: v_perm_b32 v5, v60, v44, s5 - ; GCN-NEXT: v_perm_b32 v21, v60, v44, s8 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND ; GCN-NEXT: buffer_wbl2 sc0 sc1 - ; GCN-NEXT: ds_write_b64 v135, v[4:5] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[0:1], v[24:25], v[96:111] - ; GCN-NEXT: v_perm_b32 v0, v43, v41, s5 - ; GCN-NEXT: v_fma_f32 v41, s4, v6, -v134 - ; GCN-NEXT: v_cvt_f16_f32_e32 v6, v34 - ; GCN-NEXT: v_mul_f32_e32 v1, 0x3fb8aa3b, v131 - ; GCN-NEXT: v_exp_f32_e32 v42, v1 - ; GCN-NEXT: v_perm_b32 v1, v61, v45, s5 + ; GCN-NEXT: ds_write_b64 v199, v[150:151] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v136, v[20:21] + ; GCN-NEXT: ds_write_b64 v200, v[152:153] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[132:133], v[142:143], v[0:15] + ; GCN-NEXT: v_cvt_f16_f32_e32 v132, v125 + ; GCN-NEXT: v_exp_f32_e32 v130, v158 ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v137, v[0:1] + ; GCN-NEXT: ds_write_b64 v201, v[154:155] ; GCN-NEXT: buffer_wbl2 sc0 sc1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_write_b64 v138, v[26:27] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[16:17], v[24:25], v[112:127] - ; GCN-NEXT: v_pack_b32_f16 v17, v40, v6 - ; GCN-NEXT: v_mul_f32_e32 v6, 0x3fb8aa3b, v32 + ; GCN-NEXT: ds_write_b64 v202, v[156:157] ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND - ; GCN-NEXT: v_pack_b32_f16 v16, v37, v28 - ; GCN-NEXT: v_fma_f32 v24, s4, v7, -v134 - ; GCN-NEXT: v_exp_f32_e32 v25, v6 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_read_b128 v[4:7], v139 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[22:23], v[16:17], v[64:79] - ; GCN-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v149 - ; GCN-NEXT: v_exp_f32_e32 v26, v0 - ; GCN-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v29 - ; GCN-NEXT: v_cvt_f16_f32_e32 v1, v150 - ; GCN-NEXT: v_cvt_f16_f32_e32 v27, v38 - ; GCN-NEXT: ds_read_b128 v[20:23], v139 offset:576 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_fma_f32 v28, s4, v9, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[30:31], v[16:17], v[80:95] - ; GCN-NEXT: v_exp_f32_e32 v29, v0 - ; GCN-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v41 - ; GCN-NEXT: v_fma_f32 v30, s4, v10, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[2:3], v[16:17], v[96:111] - ; GCN-NEXT: v_cvt_f16_f32_e32 v2, v42 - ; GCN-NEXT: v_exp_f32_e32 v31, v0 - ; GCN-NEXT: v_cvt_f16_f32_e32 v0, v25 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[18:19], v[16:17], v[112:127] - ; GCN-NEXT: v_pack_b32_f16 v17, v2, v0 - ; GCN-NEXT: v_pack_b32_f16 v16, v1, v27 - ; GCN-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v24 - ; GCN-NEXT: v_fma_f32 v18, s4, v11, -v134 - ; GCN-NEXT: v_exp_f32_e32 v19, v0 - ; GCN-NEXT: ds_read_b128 v[0:3], v139 offset:1152 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[4:5], v[16:17], v[64:79] - ; GCN-NEXT: v_mul_f32_e32 v4, 0x3fb8aa3b, v8 - ; GCN-NEXT: ds_read_b128 v[8:11], v139 offset:1728 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_exp_f32_e32 v24, v4 - ; GCN-NEXT: v_mul_f32_e32 v4, 0x3fb8aa3b, v28 - ; GCN-NEXT: v_cvt_f16_f32_e32 v5, v26 - ; GCN-NEXT: v_exp_f32_e32 v27, v4 - ; GCN-NEXT: v_mul_f32_e32 v4, 0x3fb8aa3b, v18 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[20:21], v[16:17], v[80:95] - ; GCN-NEXT: v_cvt_f16_f32_e32 v20, v29 - ; GCN-NEXT: v_fma_f32 v21, s4, v13, -v134 - ; GCN-NEXT: v_fma_f32 v28, s4, v14, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[0:1], v[16:17], v[96:111] - ; GCN-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v30 - ; GCN-NEXT: v_cvt_f16_f32_e32 v1, v31 - ; GCN-NEXT: v_exp_f32_e32 v30, v0 - ; GCN-NEXT: v_cvt_f16_f32_e32 v0, v19 - ; GCN-NEXT: v_pack_b32_f16 v1, v1, v0 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[8:9], v[16:17], v[112:127] - ; GCN-NEXT: v_exp_f32_e32 v16, v4 - ; GCN-NEXT: v_pack_b32_f16 v0, v5, v20 - ; GCN-NEXT: v_mul_f32_e32 v9, 0x3fb8aa3b, v12 - ; GCN-NEXT: v_exp_f32_e32 v18, v9 - ; GCN-NEXT: v_mul_f32_e32 v9, 0x3fb8aa3b, v21 - ; GCN-NEXT: v_exp_f32_e32 v21, v9 - ; GCN-NEXT: v_fma_f32 v8, s4, v15, -v134 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[6:7], v[0:1], v[64:79] - ; GCN-NEXT: ds_read_b128 v[4:7], v57 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: ds_read_b128 v[12:15], v57 offset:576 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 - ; GCN-NEXT: v_cvt_f16_f32_e32 v17, v24 - ; GCN-NEXT: v_cvt_f16_f32_e32 v20, v27 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[22:23], v[0:1], v[80:95] - ; GCN-NEXT: v_cvt_f16_f32_e32 v22, v21 - ; GCN-NEXT: v_cvt_f16_f32_e32 v23, v18 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[2:3], v[0:1], v[96:111] - ; GCN-NEXT: v_cvt_f16_f32_e32 v3, v30 - ; GCN-NEXT: v_mul_f32_e32 v2, 0x3fb8aa3b, v28 - ; GCN-NEXT: v_exp_f32_e32 v2, v2 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[112:127], v[10:11], v[0:1], v[112:127] - ; GCN-NEXT: v_cvt_f16_f32_e32 v0, v16 - ; GCN-NEXT: v_mul_f32_e32 v1, 0x3fb8aa3b, v8 - ; GCN-NEXT: v_exp_f32_e32 v10, v1 - ; GCN-NEXT: v_pack_b32_f16 v8, v17, v20 - ; GCN-NEXT: v_pack_b32_f16 v9, v3, v0 - ; GCN-NEXT: v_add_f32_e32 v3, 0, v49 - ; GCN-NEXT: v_add_f32_e32 v3, v50, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v51, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v52, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v53, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v54, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v55, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v56, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v58, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v163, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v164, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v59, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v160, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v162, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v151, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v153, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v165, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v161, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v159, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v152, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v154, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v155, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v157, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v146, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v147, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v143, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v156, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v129, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v142, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v63, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v158, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v128, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v167, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v130, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v140, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v144, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v132, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v62, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v145, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v35, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v46, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v47, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v141, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v33, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v36, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v39, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v148, v3 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[12:13], v[8:9], v[80:95] - ; GCN-NEXT: v_add_f32_e32 v3, v34, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v150, v3 - ; GCN-NEXT: v_cvt_f16_f32_e32 v1, v10 - ; GCN-NEXT: v_cvt_f16_f32_e32 v11, v2 - ; GCN-NEXT: v_add_f32_e32 v3, v38, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v42, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v25, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v26, v3 - ; GCN-NEXT: v_pack_b32_f16 v1, v11, v1 - ; GCN-NEXT: v_pack_b32_f16 v0, v23, v22 - ; GCN-NEXT: v_add_f32_e32 v3, v29, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v31, v3 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[80:95], v[14:15], v[0:1], v[80:95] - ; GCN-NEXT: v_add_f32_e32 v3, v19, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v24, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v27, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v30, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v16, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v18, v3 - ; GCN-NEXT: v_add_f32_e32 v3, v21, v3 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[4:5], v[8:9], v[64:79] - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[64:79], v[6:7], v[0:1], v[64:79] - ; GCN-NEXT: v_add_f32_e32 v0, v2, v3 - ; GCN-NEXT: v_add_f32_e32 v4, v10, v0 - ; GCN-NEXT: ds_bpermute_b32 v5, v133, v4 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: ds_read_b128 v[0:3], v57 offset:1152 - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[146:147], v[142:143], v[32:47] + ; GCN-NEXT: v_mul_f32_e32 v146, 0x3fb8aa3b, v68 + ; GCN-NEXT: v_cvt_f16_f32_e32 v68, v126 + ; GCN-NEXT: v_exp_f32_e32 v131, v144 + ; GCN-NEXT: v_mul_f32_e32 v144, 0x3fb8aa3b, v69 + ; GCN-NEXT: v_fma_f32 v69, s4, v71, -v128 + ; GCN-NEXT: v_pack_b32_f16 v140, v132, v68 + ; GCN-NEXT: v_cvt_f16_f32_e32 v68, v129 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[64:65], v[142:143], v[16:31] + ; GCN-NEXT: v_cvt_f16_f32_e32 v64, v127 + ; GCN-NEXT: v_exp_f32_e32 v132, v145 + ; GCN-NEXT: v_fma_f32 v65, s4, v70, -v128 + ; GCN-NEXT: v_mul_f32_e32 v65, 0x3fb8aa3b, v65 + ; GCN-NEXT: v_fma_f32 v145, s4, v73, -v128 + ; GCN-NEXT: v_mul_f32_e32 v147, 0x3fb8aa3b, v145 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[136:137], v[142:143], v[48:63] + ; GCN-NEXT: v_exp_f32_e32 v133, v141 + ; GCN-NEXT: v_mul_f32_e32 v142, 0x3fb8aa3b, v69 + ; GCN-NEXT: v_pack_b32_f16 v141, v64, v68 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: ds_read_b128 v[68:71], v198 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_fma_f32 v143, s4, v72, -v128 + ; GCN-NEXT: v_cvt_f16_f32_e32 v64, v130 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[134:135], v[140:141], v[0:15] + ; GCN-NEXT: v_exp_f32_e32 v72, v146 + ; GCN-NEXT: v_mul_f32_e32 v146, 0x3fb8aa3b, v143 + ; GCN-NEXT: v_cvt_f16_f32_e32 v143, v131 + ; GCN-NEXT: ds_read_b128 v[134:137], v198 offset:576 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_pack_b32_f16 v64, v64, v143 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[148:149], v[140:141], v[32:47] + ; GCN-NEXT: v_exp_f32_e32 v73, v144 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[66:67], v[140:141], v[16:31] + ; GCN-NEXT: v_cvt_f16_f32_e32 v66, v132 + ; GCN-NEXT: v_fma_f32 v67, s4, v74, -v128 + ; GCN-NEXT: v_exp_f32_e32 v74, v65 + ; GCN-NEXT: v_cvt_f16_f32_e32 v65, v133 + ; GCN-NEXT: v_mul_f32_e32 v67, 0x3fb8aa3b, v67 + ; GCN-NEXT: v_pack_b32_f16 v65, v66, v65 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[138:139], v[140:141], v[48:63] + ; GCN-NEXT: v_fma_f32 v138, s4, v75, -v128 + ; GCN-NEXT: v_exp_f32_e32 v75, v142 + ; GCN-NEXT: v_mul_f32_e32 v148, 0x3fb8aa3b, v138 + ; GCN-NEXT: ds_read_b128 v[138:141], v198 offset:1152 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: ds_read_b128 v[142:145], v198 offset:1728 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_cvt_f16_f32_e32 v66, v72 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[68:69], v[64:65], v[0:15] + ; GCN-NEXT: v_fma_f32 v68, s4, v76, -v128 + ; GCN-NEXT: v_exp_f32_e32 v76, v146 + ; GCN-NEXT: v_mul_f32_e32 v146, 0x3fb8aa3b, v68 + ; GCN-NEXT: v_cvt_f16_f32_e32 v68, v73 + ; GCN-NEXT: v_fma_f32 v69, s4, v77, -v128 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[134:135], v[64:65], v[32:47] + ; GCN-NEXT: v_exp_f32_e32 v77, v147 + ; GCN-NEXT: v_pack_b32_f16 v134, v66, v68 + ; GCN-NEXT: v_fma_f32 v68, s4, v78, -v128 + ; GCN-NEXT: v_cvt_f16_f32_e32 v66, v74 + ; GCN-NEXT: v_mul_f32_e32 v147, 0x3fb8aa3b, v69 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[138:139], v[64:65], v[16:31] + ; GCN-NEXT: v_exp_f32_e32 v78, v67 + ; GCN-NEXT: v_mul_f32_e32 v138, 0x3fb8aa3b, v68 + ; GCN-NEXT: v_cvt_f16_f32_e32 v139, v76 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[142:143], v[64:65], v[48:63] + ; GCN-NEXT: v_cvt_f16_f32_e32 v64, v75 + ; GCN-NEXT: v_fma_f32 v65, s4, v79, -v128 + ; GCN-NEXT: v_exp_f32_e32 v79, v148 + ; GCN-NEXT: v_mul_f32_e32 v128, 0x3fb8aa3b, v65 + ; GCN-NEXT: v_pack_b32_f16 v135, v66, v64 + ; GCN-NEXT: s_nop 1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[70:71], v[134:135], v[0:15] + ; GCN-NEXT: v_exp_f32_e32 v142, v146 + ; GCN-NEXT: ds_read_b128 v[68:71], v197 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: ds_read_b128 v[64:67], v197 offset:576 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[136:137], v[134:135], v[32:47] + ; GCN-NEXT: v_exp_f32_e32 v137, v147 + ; GCN-NEXT: v_cvt_f16_f32_e32 v136, v77 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[140:141], v[134:135], v[16:31] + ; GCN-NEXT: v_exp_f32_e32 v138, v138 + ; GCN-NEXT: v_cvt_f16_f32_e32 v140, v78 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[48:63], v[144:145], v[134:135], v[48:63] + ; GCN-NEXT: s_nop 10 + ; GCN-NEXT: v_exp_f32_e32 v52, v128 + ; GCN-NEXT: v_cvt_f16_f32_e32 v50, v137 + ; GCN-NEXT: v_cvt_f16_f32_e32 v51, v142 + ; GCN-NEXT: v_cvt_f16_f32_e32 v54, v138 + ; GCN-NEXT: v_cvt_f16_f32_e32 v53, v52 + ; GCN-NEXT: v_cvt_f16_f32_e32 v49, v79 + ; GCN-NEXT: v_pack_b32_f16 v50, v51, v50 + ; GCN-NEXT: v_pack_b32_f16 v48, v139, v136 + ; GCN-NEXT: v_pack_b32_f16 v51, v54, v53 + ; GCN-NEXT: v_add_f32_e32 v53, 0, v113 + ; GCN-NEXT: v_add_f32_e32 v53, v114, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v115, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v116, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v117, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v118, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v119, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v120, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v121, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v122, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v123, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v124, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v96, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v97, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v98, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v99, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v100, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v101, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v102, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v103, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v104, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v105, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v106, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v107, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v108, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v109, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v110, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v111, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v80, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v81, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v82, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v83, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v84, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v85, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v86, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v87, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v88, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v89, v53 + ; GCN-NEXT: v_pack_b32_f16 v49, v140, v49 + ; GCN-NEXT: v_add_f32_e32 v53, v90, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v91, v53 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[68:69], v[48:49], v[0:15] + ; GCN-NEXT: v_add_f32_e32 v53, v92, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v93, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v94, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v95, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v125, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v126, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v127, v53 + ; GCN-NEXT: v_add_f32_e32 v53, v129, v53 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[0:15], v[70:71], v[50:51], v[0:15] + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[64:65], v[48:49], v[32:47] + ; GCN-NEXT: s_nop 9 + ; GCN-NEXT: v_add_f32_e32 v0, v130, v53 + ; GCN-NEXT: v_add_f32_e32 v0, v131, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v132, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v133, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v72, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v73, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v74, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v75, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v76, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v77, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v78, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v79, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v142, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v137, v0 + ; GCN-NEXT: v_add_f32_e32 v0, v138, v0 + ; GCN-NEXT: v_add_f32_e32 v4, v52, v0 + ; GCN-NEXT: ds_bpermute_b32 v5, v196, v4 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: ds_read_b128 v[0:3], v197 offset:1152 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: buffer_inv sc0 sc1 + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[16:31], v[0:1], v[48:49], v[16:31] ; GCN-NEXT: v_add_f32_e32 v2, v4, v5 - ; GCN-NEXT: ds_bpermute_b32 v3, v133, v2 - ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[96:111], v[0:1], v[8:9], v[96:111] - ; GCN-NEXT: s_waitcnt lgkmcnt(0) - ; GCN-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[6:7] + ; GCN-NEXT: ds_bpermute_b32 v3, v196, v2 ; GCN-NEXT: ; implicit-def: $vgpr4 - ; GCN-NEXT: v_fmac_f32_e32 v0, v4, v48 - ; GCN-NEXT: ds_read_b128 v[0:3], v57 offset:1728 + ; GCN-NEXT: s_waitcnt lgkmcnt(0) + ; GCN-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[12:13] + ; GCN-NEXT: v_fmac_f32_e32 v0, v4, v112 + ; GCN-NEXT: ds_read_b128 v[0:3], v197 offset:1728 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_inv sc0 sc1 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: s_waitcnt vmcnt(8) ; GCN-NEXT: ;;#ASMEND + ; GCN-NEXT: v_mfma_f32_32x32x8_f16 v[32:47], v[66:67], v[50:51], v[32:47] ; GCN-NEXT: s_endpgm attributes #0 = {"amdgpu-flat-work-group-size"="256,256"} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll index 7959cee..e174fc1 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.ll @@ -156,62 +156,62 @@ define amdgpu_kernel void @test_iglp_opt_rev_mfma_gemm(ptr addrspace(3) noalias ; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0 ; GCN-NEXT: v_and_b32_e32 v0, 0x1ff80, v0 ; GCN-NEXT: v_mov_b32_e32 v2, 1.0 -; GCN-NEXT: v_mov_b32_e32 v3, 2.0 +; GCN-NEXT: v_mov_b32_e32 v1, 2.0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_add_u32_e32 v1, s0, v0 -; GCN-NEXT: ds_read_b128 a[28:31], v1 offset:112 -; GCN-NEXT: ds_read_b128 a[24:27], v1 offset:96 -; GCN-NEXT: ds_read_b128 a[20:23], v1 offset:80 -; GCN-NEXT: ds_read_b128 a[16:19], v1 offset:64 -; GCN-NEXT: ds_read_b128 a[0:3], v1 -; GCN-NEXT: ds_read_b128 a[4:7], v1 offset:16 -; GCN-NEXT: ds_read_b128 a[8:11], v1 offset:32 -; GCN-NEXT: ds_read_b128 a[12:15], v1 offset:48 +; GCN-NEXT: v_add_u32_e32 v3, s0, v0 +; GCN-NEXT: ds_read_b128 a[28:31], v3 offset:112 +; GCN-NEXT: ds_read_b128 a[24:27], v3 offset:96 +; GCN-NEXT: ds_read_b128 a[20:23], v3 offset:80 +; GCN-NEXT: ds_read_b128 a[16:19], v3 offset:64 +; GCN-NEXT: ds_read_b128 a[0:3], v3 +; GCN-NEXT: ds_read_b128 a[4:7], v3 offset:16 +; GCN-NEXT: ds_read_b128 a[8:11], v3 offset:32 +; GCN-NEXT: ds_read_b128 a[12:15], v3 offset:48 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v3, a[0:31] -; GCN-NEXT: ds_read_b128 a[156:159], v1 offset:8304 -; GCN-NEXT: ds_read_b128 a[152:155], v1 offset:8288 -; GCN-NEXT: ds_read_b128 a[148:151], v1 offset:8272 -; GCN-NEXT: ds_read_b128 a[144:147], v1 offset:8256 -; GCN-NEXT: ds_read_b128 a[140:143], v1 offset:8240 -; GCN-NEXT: ds_read_b128 a[136:139], v1 offset:8224 -; GCN-NEXT: ds_read_b128 a[132:135], v1 offset:8208 -; GCN-NEXT: ds_read_b128 a[128:131], v1 offset:8192 +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v1, a[0:31] +; GCN-NEXT: ds_read_b128 a[156:159], v3 offset:8304 +; GCN-NEXT: ds_read_b128 a[152:155], v3 offset:8288 +; GCN-NEXT: ds_read_b128 a[148:151], v3 offset:8272 +; GCN-NEXT: ds_read_b128 a[144:147], v3 offset:8256 +; GCN-NEXT: ds_read_b128 a[140:143], v3 offset:8240 +; GCN-NEXT: ds_read_b128 a[136:139], v3 offset:8224 +; GCN-NEXT: ds_read_b128 a[132:135], v3 offset:8208 +; GCN-NEXT: ds_read_b128 a[128:131], v3 offset:8192 +; GCN-NEXT: v_add_u32_e32 v4, 0x6000, v3 ; GCN-NEXT: v_add_u32_e32 v0, s1, v0 ; GCN-NEXT: ; iglp_opt mask(0x00000001) ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v2, v3, a[128:159] -; GCN-NEXT: ds_read_b128 a[124:127], v1 offset:24688 -; GCN-NEXT: ds_read_b128 a[120:123], v1 offset:24672 -; GCN-NEXT: ds_read_b128 a[116:119], v1 offset:24656 -; GCN-NEXT: ds_read_b128 a[112:115], v1 offset:24640 -; GCN-NEXT: ds_read_b128 a[108:111], v1 offset:24624 -; GCN-NEXT: ds_read_b128 a[104:107], v1 offset:24608 -; GCN-NEXT: ds_read_b128 a[100:103], v1 offset:24592 -; GCN-NEXT: ds_read_b128 a[96:99], v1 offset:24576 +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v2, v1, a[128:159] +; GCN-NEXT: ds_read_b128 a[124:127], v3 offset:24688 +; GCN-NEXT: ds_read_b128 a[120:123], v3 offset:24672 +; GCN-NEXT: ds_read_b128 a[116:119], v3 offset:24656 +; GCN-NEXT: ds_read_b128 a[112:115], v3 offset:24640 +; GCN-NEXT: ds_read_b128 a[108:111], v3 offset:24624 +; GCN-NEXT: ds_read_b128 a[104:107], v3 offset:24608 +; GCN-NEXT: ds_read_b128 a[100:103], v3 offset:24592 +; GCN-NEXT: ds_read_b128 a[96:99], v3 offset:24576 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v2, v3, a[96:127] -; GCN-NEXT: ds_read_b128 a[92:95], v1 offset:49264 -; GCN-NEXT: ds_read_b128 a[88:91], v1 offset:49248 -; GCN-NEXT: ds_read_b128 a[84:87], v1 offset:49232 -; GCN-NEXT: ds_read_b128 a[80:83], v1 offset:49216 -; GCN-NEXT: ds_read_b128 a[76:79], v1 offset:49200 -; GCN-NEXT: ds_read_b128 a[72:75], v1 offset:49184 -; GCN-NEXT: ds_read_b128 a[68:71], v1 offset:49168 -; GCN-NEXT: ds_read_b128 a[64:67], v1 offset:49152 -; GCN-NEXT: v_add_u32_e32 v1, 0x6000, v1 +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v2, v1, a[96:127] +; GCN-NEXT: ds_read_b128 a[92:95], v3 offset:49264 +; GCN-NEXT: ds_read_b128 a[88:91], v3 offset:49248 +; GCN-NEXT: ds_read_b128 a[84:87], v3 offset:49232 +; GCN-NEXT: ds_read_b128 a[80:83], v3 offset:49216 +; GCN-NEXT: ds_read_b128 a[76:79], v3 offset:49200 +; GCN-NEXT: ds_read_b128 a[72:75], v3 offset:49184 +; GCN-NEXT: ds_read_b128 a[68:71], v3 offset:49168 +; GCN-NEXT: ds_read_b128 a[64:67], v3 offset:49152 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v2, v3, a[64:95] -; GCN-NEXT: ds_read_b128 a[60:63], v1 offset:57456 -; GCN-NEXT: ds_read_b128 a[56:59], v1 offset:57440 -; GCN-NEXT: ds_read_b128 a[52:55], v1 offset:57424 -; GCN-NEXT: ds_read_b128 a[48:51], v1 offset:57408 -; GCN-NEXT: ds_read_b128 a[32:35], v1 offset:57344 -; GCN-NEXT: ds_read_b128 a[36:39], v1 offset:57360 -; GCN-NEXT: ds_read_b128 a[40:43], v1 offset:57376 -; GCN-NEXT: ds_read_b128 a[44:47], v1 offset:57392 +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v2, v1, a[64:95] +; GCN-NEXT: ds_read_b128 a[60:63], v4 offset:57456 +; GCN-NEXT: ds_read_b128 a[56:59], v4 offset:57440 +; GCN-NEXT: ds_read_b128 a[52:55], v4 offset:57424 +; GCN-NEXT: ds_read_b128 a[48:51], v4 offset:57408 +; GCN-NEXT: ds_read_b128 a[32:35], v4 offset:57344 +; GCN-NEXT: ds_read_b128 a[36:39], v4 offset:57360 +; GCN-NEXT: ds_read_b128 a[40:43], v4 offset:57376 +; GCN-NEXT: ds_read_b128 a[44:47], v4 offset:57392 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v2, v3, a[32:63] +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v2, v1, a[32:63] ; GCN-NEXT: ds_write_b128 v0, a[28:31] offset:112 ; GCN-NEXT: ds_write_b128 v0, a[24:27] offset:96 ; GCN-NEXT: ds_write_b128 v0, a[20:23] offset:80 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll index aa099b6..b65a1a8 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll @@ -623,62 +623,62 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_cluster(ptr ad ; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 7, v0 ; GCN-NEXT: v_and_b32_e32 v0, 0x1ff80, v0 +; GCN-NEXT: v_mov_b32_e32 v2, 1.0 +; GCN-NEXT: v_mov_b32_e32 v1, 2.0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_add_u32_e32 v1, s0, v0 -; GCN-NEXT: ds_read_b128 a[156:159], v1 offset:112 -; GCN-NEXT: ds_read_b128 a[152:155], v1 offset:96 -; GCN-NEXT: ds_read_b128 a[148:151], v1 offset:80 -; GCN-NEXT: ds_read_b128 a[144:147], v1 offset:64 -; GCN-NEXT: ds_read_b128 a[128:131], v1 -; GCN-NEXT: ds_read_b128 a[132:135], v1 offset:16 -; GCN-NEXT: ds_read_b128 a[136:139], v1 offset:32 -; GCN-NEXT: ds_read_b128 a[140:143], v1 offset:48 -; GCN-NEXT: ds_read_b128 a[28:31], v1 offset:8304 -; GCN-NEXT: ds_read_b128 a[24:27], v1 offset:8288 -; GCN-NEXT: ds_read_b128 a[20:23], v1 offset:8272 -; GCN-NEXT: ds_read_b128 a[16:19], v1 offset:8256 -; GCN-NEXT: ds_read_b128 a[12:15], v1 offset:8240 -; GCN-NEXT: ds_read_b128 a[8:11], v1 offset:8224 -; GCN-NEXT: ds_read_b128 a[4:7], v1 offset:8208 -; GCN-NEXT: ds_read_b128 a[0:3], v1 offset:8192 -; GCN-NEXT: v_add_u32_e32 v2, 0x6000, v1 -; GCN-NEXT: ds_read_b128 a[124:127], v1 offset:24688 -; GCN-NEXT: ds_read_b128 a[120:123], v1 offset:24672 -; GCN-NEXT: ds_read_b128 a[116:119], v1 offset:24656 -; GCN-NEXT: ds_read_b128 a[112:115], v1 offset:24640 -; GCN-NEXT: ds_read_b128 a[108:111], v1 offset:24624 -; GCN-NEXT: ds_read_b128 a[104:107], v1 offset:24608 -; GCN-NEXT: ds_read_b128 a[100:103], v1 offset:24592 -; GCN-NEXT: ds_read_b128 a[96:99], v1 offset:24576 -; GCN-NEXT: ds_read_b128 a[92:95], v1 offset:49264 -; GCN-NEXT: ds_read_b128 a[88:91], v1 offset:49248 -; GCN-NEXT: ds_read_b128 a[84:87], v1 offset:49232 -; GCN-NEXT: ds_read_b128 a[80:83], v1 offset:49216 -; GCN-NEXT: ds_read_b128 a[76:79], v1 offset:49200 -; GCN-NEXT: ds_read_b128 a[72:75], v1 offset:49184 -; GCN-NEXT: ds_read_b128 a[68:71], v1 offset:49168 -; GCN-NEXT: ds_read_b128 a[64:67], v1 offset:49152 -; GCN-NEXT: v_mov_b32_e32 v1, 1.0 -; GCN-NEXT: ds_read_b128 a[60:63], v2 offset:57456 -; GCN-NEXT: ds_read_b128 a[56:59], v2 offset:57440 -; GCN-NEXT: ds_read_b128 a[52:55], v2 offset:57424 -; GCN-NEXT: ds_read_b128 a[48:51], v2 offset:57408 -; GCN-NEXT: ds_read_b128 a[32:35], v2 offset:57344 -; GCN-NEXT: ds_read_b128 a[36:39], v2 offset:57360 -; GCN-NEXT: ds_read_b128 a[40:43], v2 offset:57376 -; GCN-NEXT: ds_read_b128 a[44:47], v2 offset:57392 -; GCN-NEXT: v_mov_b32_e32 v2, 2.0 +; GCN-NEXT: v_add_u32_e32 v3, s0, v0 +; GCN-NEXT: ds_read_b128 a[156:159], v3 offset:112 +; GCN-NEXT: ds_read_b128 a[152:155], v3 offset:96 +; GCN-NEXT: ds_read_b128 a[148:151], v3 offset:80 +; GCN-NEXT: ds_read_b128 a[144:147], v3 offset:64 +; GCN-NEXT: ds_read_b128 a[128:131], v3 +; GCN-NEXT: ds_read_b128 a[132:135], v3 offset:16 +; GCN-NEXT: ds_read_b128 a[136:139], v3 offset:32 +; GCN-NEXT: ds_read_b128 a[140:143], v3 offset:48 +; GCN-NEXT: v_add_u32_e32 v4, 0x6000, v3 +; GCN-NEXT: ds_read_b128 a[28:31], v3 offset:8304 +; GCN-NEXT: ds_read_b128 a[24:27], v3 offset:8288 +; GCN-NEXT: ds_read_b128 a[20:23], v3 offset:8272 +; GCN-NEXT: ds_read_b128 a[16:19], v3 offset:8256 +; GCN-NEXT: ds_read_b128 a[12:15], v3 offset:8240 +; GCN-NEXT: ds_read_b128 a[8:11], v3 offset:8224 +; GCN-NEXT: ds_read_b128 a[4:7], v3 offset:8208 +; GCN-NEXT: ds_read_b128 a[0:3], v3 offset:8192 +; GCN-NEXT: ds_read_b128 a[124:127], v3 offset:24688 +; GCN-NEXT: ds_read_b128 a[120:123], v3 offset:24672 +; GCN-NEXT: ds_read_b128 a[116:119], v3 offset:24656 +; GCN-NEXT: ds_read_b128 a[112:115], v3 offset:24640 +; GCN-NEXT: ds_read_b128 a[108:111], v3 offset:24624 +; GCN-NEXT: ds_read_b128 a[104:107], v3 offset:24608 +; GCN-NEXT: ds_read_b128 a[100:103], v3 offset:24592 +; GCN-NEXT: ds_read_b128 a[96:99], v3 offset:24576 +; GCN-NEXT: ds_read_b128 a[92:95], v3 offset:49264 +; GCN-NEXT: ds_read_b128 a[88:91], v3 offset:49248 +; GCN-NEXT: ds_read_b128 a[84:87], v3 offset:49232 +; GCN-NEXT: ds_read_b128 a[80:83], v3 offset:49216 +; GCN-NEXT: ds_read_b128 a[76:79], v3 offset:49200 +; GCN-NEXT: ds_read_b128 a[72:75], v3 offset:49184 +; GCN-NEXT: ds_read_b128 a[68:71], v3 offset:49168 +; GCN-NEXT: ds_read_b128 a[64:67], v3 offset:49152 +; GCN-NEXT: ds_read_b128 a[60:63], v4 offset:57456 +; GCN-NEXT: ds_read_b128 a[56:59], v4 offset:57440 +; GCN-NEXT: ds_read_b128 a[52:55], v4 offset:57424 +; GCN-NEXT: ds_read_b128 a[48:51], v4 offset:57408 +; GCN-NEXT: ds_read_b128 a[32:35], v4 offset:57344 +; GCN-NEXT: ds_read_b128 a[36:39], v4 offset:57360 +; GCN-NEXT: ds_read_b128 a[40:43], v4 offset:57376 +; GCN-NEXT: ds_read_b128 a[44:47], v4 offset:57392 +; GCN-NEXT: s_waitcnt lgkmcnt(14) +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v2, v1, a[128:159] ; GCN-NEXT: v_add_u32_e32 v0, s1, v0 ; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(40) SyncID(0) -; GCN-NEXT: s_waitcnt lgkmcnt(14) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v1, v2, a[128:159] -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v1, v2, a[0:31] -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v1, v2, a[96:127] ; GCN-NEXT: s_waitcnt lgkmcnt(8) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v1, v2, a[64:95] +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v2, v1, a[64:95] +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v2, v1, a[96:127] +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v1, a[0:31] ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v1, v2, a[32:63] -; GCN-NEXT: s_nop 12 +; GCN-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v2, v1, a[32:63] +; GCN-NEXT: s_nop 11 ; GCN-NEXT: ds_write_b128 v0, a[156:159] offset:112 ; GCN-NEXT: ds_write_b128 v0, a[152:155] offset:96 ; GCN-NEXT: ds_write_b128 v0, a[148:151] offset:80 @@ -729,62 +729,62 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_cluster(ptr ad ; EXACTCUTOFF-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; EXACTCUTOFF-NEXT: v_lshlrev_b32_e32 v0, 7, v0 ; EXACTCUTOFF-NEXT: v_and_b32_e32 v0, 0x1ff80, v0 +; EXACTCUTOFF-NEXT: v_mov_b32_e32 v2, 1.0 +; EXACTCUTOFF-NEXT: v_mov_b32_e32 v1, 2.0 ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0) -; EXACTCUTOFF-NEXT: v_add_u32_e32 v1, s0, v0 -; EXACTCUTOFF-NEXT: ds_read_b128 a[156:159], v1 offset:112 -; EXACTCUTOFF-NEXT: ds_read_b128 a[152:155], v1 offset:96 -; EXACTCUTOFF-NEXT: ds_read_b128 a[148:151], v1 offset:80 -; EXACTCUTOFF-NEXT: ds_read_b128 a[144:147], v1 offset:64 -; EXACTCUTOFF-NEXT: ds_read_b128 a[128:131], v1 -; EXACTCUTOFF-NEXT: ds_read_b128 a[132:135], v1 offset:16 -; EXACTCUTOFF-NEXT: ds_read_b128 a[136:139], v1 offset:32 -; EXACTCUTOFF-NEXT: ds_read_b128 a[140:143], v1 offset:48 -; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v1 offset:8304 -; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v1 offset:8288 -; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v1 offset:8272 -; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v1 offset:8256 -; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v1 offset:8240 -; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v1 offset:8224 -; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v1 offset:8208 -; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v1 offset:8192 -; EXACTCUTOFF-NEXT: v_add_u32_e32 v2, 0x6000, v1 -; EXACTCUTOFF-NEXT: ds_read_b128 a[124:127], v1 offset:24688 -; EXACTCUTOFF-NEXT: ds_read_b128 a[120:123], v1 offset:24672 -; EXACTCUTOFF-NEXT: ds_read_b128 a[116:119], v1 offset:24656 -; EXACTCUTOFF-NEXT: ds_read_b128 a[112:115], v1 offset:24640 -; EXACTCUTOFF-NEXT: ds_read_b128 a[108:111], v1 offset:24624 -; EXACTCUTOFF-NEXT: ds_read_b128 a[104:107], v1 offset:24608 -; EXACTCUTOFF-NEXT: ds_read_b128 a[100:103], v1 offset:24592 -; EXACTCUTOFF-NEXT: ds_read_b128 a[96:99], v1 offset:24576 -; EXACTCUTOFF-NEXT: ds_read_b128 a[92:95], v1 offset:49264 -; EXACTCUTOFF-NEXT: ds_read_b128 a[88:91], v1 offset:49248 -; EXACTCUTOFF-NEXT: ds_read_b128 a[84:87], v1 offset:49232 -; EXACTCUTOFF-NEXT: ds_read_b128 a[80:83], v1 offset:49216 -; EXACTCUTOFF-NEXT: ds_read_b128 a[76:79], v1 offset:49200 -; EXACTCUTOFF-NEXT: ds_read_b128 a[72:75], v1 offset:49184 -; EXACTCUTOFF-NEXT: ds_read_b128 a[68:71], v1 offset:49168 -; EXACTCUTOFF-NEXT: ds_read_b128 a[64:67], v1 offset:49152 -; EXACTCUTOFF-NEXT: v_mov_b32_e32 v1, 1.0 -; EXACTCUTOFF-NEXT: ds_read_b128 a[60:63], v2 offset:57456 -; EXACTCUTOFF-NEXT: ds_read_b128 a[56:59], v2 offset:57440 -; EXACTCUTOFF-NEXT: ds_read_b128 a[52:55], v2 offset:57424 -; EXACTCUTOFF-NEXT: ds_read_b128 a[48:51], v2 offset:57408 -; EXACTCUTOFF-NEXT: ds_read_b128 a[32:35], v2 offset:57344 -; EXACTCUTOFF-NEXT: ds_read_b128 a[36:39], v2 offset:57360 -; EXACTCUTOFF-NEXT: ds_read_b128 a[40:43], v2 offset:57376 -; EXACTCUTOFF-NEXT: ds_read_b128 a[44:47], v2 offset:57392 -; EXACTCUTOFF-NEXT: v_mov_b32_e32 v2, 2.0 +; EXACTCUTOFF-NEXT: v_add_u32_e32 v3, s0, v0 +; EXACTCUTOFF-NEXT: ds_read_b128 a[156:159], v3 offset:112 +; EXACTCUTOFF-NEXT: ds_read_b128 a[152:155], v3 offset:96 +; EXACTCUTOFF-NEXT: ds_read_b128 a[148:151], v3 offset:80 +; EXACTCUTOFF-NEXT: ds_read_b128 a[144:147], v3 offset:64 +; EXACTCUTOFF-NEXT: ds_read_b128 a[128:131], v3 +; EXACTCUTOFF-NEXT: ds_read_b128 a[132:135], v3 offset:16 +; EXACTCUTOFF-NEXT: ds_read_b128 a[136:139], v3 offset:32 +; EXACTCUTOFF-NEXT: ds_read_b128 a[140:143], v3 offset:48 +; EXACTCUTOFF-NEXT: v_add_u32_e32 v4, 0x6000, v3 +; EXACTCUTOFF-NEXT: ds_read_b128 a[28:31], v3 offset:8304 +; EXACTCUTOFF-NEXT: ds_read_b128 a[24:27], v3 offset:8288 +; EXACTCUTOFF-NEXT: ds_read_b128 a[20:23], v3 offset:8272 +; EXACTCUTOFF-NEXT: ds_read_b128 a[16:19], v3 offset:8256 +; EXACTCUTOFF-NEXT: ds_read_b128 a[12:15], v3 offset:8240 +; EXACTCUTOFF-NEXT: ds_read_b128 a[8:11], v3 offset:8224 +; EXACTCUTOFF-NEXT: ds_read_b128 a[4:7], v3 offset:8208 +; EXACTCUTOFF-NEXT: ds_read_b128 a[0:3], v3 offset:8192 +; EXACTCUTOFF-NEXT: ds_read_b128 a[124:127], v3 offset:24688 +; EXACTCUTOFF-NEXT: ds_read_b128 a[120:123], v3 offset:24672 +; EXACTCUTOFF-NEXT: ds_read_b128 a[116:119], v3 offset:24656 +; EXACTCUTOFF-NEXT: ds_read_b128 a[112:115], v3 offset:24640 +; EXACTCUTOFF-NEXT: ds_read_b128 a[108:111], v3 offset:24624 +; EXACTCUTOFF-NEXT: ds_read_b128 a[104:107], v3 offset:24608 +; EXACTCUTOFF-NEXT: ds_read_b128 a[100:103], v3 offset:24592 +; EXACTCUTOFF-NEXT: ds_read_b128 a[96:99], v3 offset:24576 +; EXACTCUTOFF-NEXT: ds_read_b128 a[92:95], v3 offset:49264 +; EXACTCUTOFF-NEXT: ds_read_b128 a[88:91], v3 offset:49248 +; EXACTCUTOFF-NEXT: ds_read_b128 a[84:87], v3 offset:49232 +; EXACTCUTOFF-NEXT: ds_read_b128 a[80:83], v3 offset:49216 +; EXACTCUTOFF-NEXT: ds_read_b128 a[76:79], v3 offset:49200 +; EXACTCUTOFF-NEXT: ds_read_b128 a[72:75], v3 offset:49184 +; EXACTCUTOFF-NEXT: ds_read_b128 a[68:71], v3 offset:49168 +; EXACTCUTOFF-NEXT: ds_read_b128 a[64:67], v3 offset:49152 +; EXACTCUTOFF-NEXT: ds_read_b128 a[60:63], v4 offset:57456 +; EXACTCUTOFF-NEXT: ds_read_b128 a[56:59], v4 offset:57440 +; EXACTCUTOFF-NEXT: ds_read_b128 a[52:55], v4 offset:57424 +; EXACTCUTOFF-NEXT: ds_read_b128 a[48:51], v4 offset:57408 +; EXACTCUTOFF-NEXT: ds_read_b128 a[32:35], v4 offset:57344 +; EXACTCUTOFF-NEXT: ds_read_b128 a[36:39], v4 offset:57360 +; EXACTCUTOFF-NEXT: ds_read_b128 a[40:43], v4 offset:57376 +; EXACTCUTOFF-NEXT: ds_read_b128 a[44:47], v4 offset:57392 +; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(14) +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v2, v1, a[128:159] ; EXACTCUTOFF-NEXT: v_add_u32_e32 v0, s1, v0 ; EXACTCUTOFF-NEXT: ; sched_group_barrier mask(0x00000100) size(40) SyncID(0) -; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(14) -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[128:159], v1, v2, a[128:159] -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v1, v2, a[0:31] -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v1, v2, a[96:127] ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(8) -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v1, v2, a[64:95] +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[64:95], v2, v1, a[64:95] +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[96:127], v2, v1, a[96:127] +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v2, v1, a[0:31] ; EXACTCUTOFF-NEXT: s_waitcnt lgkmcnt(0) -; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v1, v2, a[32:63] -; EXACTCUTOFF-NEXT: s_nop 12 +; EXACTCUTOFF-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v2, v1, a[32:63] +; EXACTCUTOFF-NEXT: s_nop 11 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[156:159] offset:112 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[152:155] offset:96 ; EXACTCUTOFF-NEXT: ds_write_b128 v0, a[148:151] offset:80 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.memcpy.ll b/llvm/test/CodeGen/AMDGPU/llvm.memcpy.ll index 63e9eef..66b7958 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.memcpy.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.memcpy.ll @@ -315,7 +315,7 @@ define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align16(ptr ad ; FUNC-LABEL: {{^}}test_memcpy_const_string_align4: ; SI: s_getpc_b64 -; SI: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, hello.align4@rel32@lo+4 +; SI: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, .Lhello.align4@rel32@lo+4 ; SI: s_addc_u32 ; SI-DAG: s_load_dwordx8 ; SI-DAG: s_load_dwordx2 diff --git a/llvm/test/CodeGen/AMDGPU/minmax.ll b/llvm/test/CodeGen/AMDGPU/minmax.ll index 56f9c5d..d578d2e 100644 --- a/llvm/test/CodeGen/AMDGPU/minmax.ll +++ b/llvm/test/CodeGen/AMDGPU/minmax.ll @@ -612,10 +612,10 @@ define void @test_med3_f32(ptr addrspace(1) %arg, float %x, float %y, float %z) ; GFX1250-NEXT: v_med3_num_f32 v2, v2, v3, v4 ; GFX1250-NEXT: global_store_b32 v[0:1], v2, off ; GFX1250-NEXT: s_set_pc_i64 s[30:31] - %tmp0 = call float @llvm.minnum.f32(float %x, float %y) - %tmp1 = call float @llvm.maxnum.f32(float %x, float %y) - %tmp2 = call float @llvm.minnum.f32(float %tmp1, float %z) - %tmp3 = call float @llvm.maxnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minnum.f32(float %x, float %y) + %tmp1 = call nnan float @llvm.maxnum.f32(float %x, float %y) + %tmp2 = call nnan float @llvm.minnum.f32(float %tmp1, float %z) + %tmp3 = call nnan float @llvm.maxnum.f32(float %tmp0, float %tmp2) store float %tmp3, ptr addrspace(1) %arg ret void } @@ -646,10 +646,10 @@ define void @test_med3_minimumnum_maximumnum_f32(ptr addrspace(1) %arg, float %x ; GFX1250-NEXT: v_med3_num_f32 v2, v2, v3, v4 ; GFX1250-NEXT: global_store_b32 v[0:1], v2, off ; GFX1250-NEXT: s_set_pc_i64 s[30:31] - %tmp0 = call float @llvm.minimumnum.f32(float %x, float %y) - %tmp1 = call float @llvm.maximumnum.f32(float %x, float %y) - %tmp2 = call float @llvm.minimumnum.f32(float %tmp1, float %z) - %tmp3 = call float @llvm.maximumnum.f32(float %tmp0, float %tmp2) + %tmp0 = call nnan float @llvm.minimumnum.f32(float %x, float %y) + %tmp1 = call nnan float @llvm.maximumnum.f32(float %x, float %y) + %tmp2 = call nnan float @llvm.minimumnum.f32(float %tmp1, float %z) + %tmp3 = call nnan float @llvm.maximumnum.f32(float %tmp0, float %tmp2) store float %tmp3, ptr addrspace(1) %arg ret void } @@ -1280,10 +1280,10 @@ define void @test_med3_f16(ptr addrspace(1) %arg, half %x, half %y, half %z) #0 ; GISEL-GFX1250-FAKE16-NEXT: v_med3_num_f16 v2, v2, v3, v4 ; GISEL-GFX1250-FAKE16-NEXT: global_store_b16 v[0:1], v2, off ; GISEL-GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31] - %tmp0 = call half @llvm.minnum.f16(half %x, half %y) - %tmp1 = call half @llvm.maxnum.f16(half %x, half %y) - %tmp2 = call half @llvm.minnum.f16(half %tmp1, half %z) - %tmp3 = call half @llvm.maxnum.f16(half %tmp0, half %tmp2) + %tmp0 = call nnan half @llvm.minnum.f16(half %x, half %y) + %tmp1 = call nnan half @llvm.maxnum.f16(half %x, half %y) + %tmp2 = call nnan half @llvm.minnum.f16(half %tmp1, half %z) + %tmp3 = call nnan half @llvm.maxnum.f16(half %tmp0, half %tmp2) store half %tmp3, ptr addrspace(1) %arg ret void } diff --git a/llvm/test/CodeGen/AMDGPU/naked-fn-with-frame-pointer.ll b/llvm/test/CodeGen/AMDGPU/naked-fn-with-frame-pointer.ll index 5ff2d82..2509497 100644 --- a/llvm/test/CodeGen/AMDGPU/naked-fn-with-frame-pointer.ll +++ b/llvm/test/CodeGen/AMDGPU/naked-fn-with-frame-pointer.ll @@ -5,8 +5,8 @@ declare dso_local void @main() define dso_local void @naked() naked "frame-pointer"="all" { ; CHECK-LABEL: naked: -; CHECK: naked$local: -; CHECK-NEXT: .type naked$local,@function +; CHECK: .Lnaked$local: +; CHECK-NEXT: .type .Lnaked$local,@function ; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CHECK-NEXT: s_getpc_b64 s[16:17] @@ -19,8 +19,8 @@ define dso_local void @naked() naked "frame-pointer"="all" { define dso_local void @normal() "frame-pointer"="all" { ; CHECK-LABEL: normal: -; CHECK: normal$local: -; CHECK-NEXT: .type normal$local,@function +; CHECK: .Lnormal$local: +; CHECK-NEXT: .type .Lnormal$local,@function ; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CHECK-NEXT: s_mov_b32 s16, s33 diff --git a/llvm/test/CodeGen/AMDGPU/readsteadycounter.ll b/llvm/test/CodeGen/AMDGPU/readsteadycounter.ll index ddbae64..a95d8c7 100644 --- a/llvm/test/CodeGen/AMDGPU/readsteadycounter.ll +++ b/llvm/test/CodeGen/AMDGPU/readsteadycounter.ll @@ -1,8 +1,8 @@ ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx700 < %s | FileCheck %s -check-prefixes=GCN,GFX700 ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s -check-prefixes=GCN,GFX900 -; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s -check-prefixes=GCN,GFX900 +; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s -check-prefixes=GCN,GFX900 ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck %s -check-prefixes=GCN,GFX1100 -; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck %s -check-prefixes=GCN,GFX1100 +; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck %s -check-prefixes=GCN,GFX1100 declare i64 @llvm.readsteadycounter() #0 diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll index 9a23788..8803f3a 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll @@ -367,77 +367,76 @@ bb: define amdgpu_kernel void @illegal_mfma_after_rewrite() #1 { ; CHECK-LABEL: illegal_mfma_after_rewrite: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: s_mov_b32 s0, 0 -; CHECK-NEXT: s_mov_b32 s1, s0 -; CHECK-NEXT: v_mov_b64_e32 v[28:29], s[0:1] +; CHECK-NEXT: s_mov_b32 s4, 0 +; CHECK-NEXT: s_mov_b32 s5, s4 +; CHECK-NEXT: v_mov_b64_e32 v[26:27], s[4:5] ; CHECK-NEXT: ;;#ASMSTART ; CHECK-NEXT: ; def s[0:3] ; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; def v[16:19] +; CHECK-NEXT: ;;#ASMEND ; CHECK-NEXT: s_nop 0 -; CHECK-NEXT: v_mov_b64_e32 v[6:7], s[2:3] -; CHECK-NEXT: v_mov_b64_e32 v[4:5], s[0:1] +; CHECK-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; CHECK-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; CHECK-NEXT: s_mov_b32 s0, 0x3c003c00 ; CHECK-NEXT: s_mov_b32 s1, s0 -; CHECK-NEXT: v_mov_b64_e32 v[30:31], s[0:1] +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[4:7], v[26:27], v[26:27], v[0:3] +; CHECK-NEXT: v_mov_b64_e32 v[28:29], s[0:1] ; CHECK-NEXT: s_mov_b32 s0, 0x7e007e00 ; CHECK-NEXT: s_mov_b32 s1, s0 -; CHECK-NEXT: v_accvgpr_write_b32 a0, s0 -; CHECK-NEXT: v_accvgpr_write_b32 a1, s1 -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], v[28:29], v[28:29], v[4:7] -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[8:11], v[28:29], v[30:31], v[4:7] -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[12:15], v[28:29], a[0:1], v[4:7] -; CHECK-NEXT: s_nop 2 -; CHECK-NEXT: v_mov_b32_e32 v4, 0x7fc00000 -; CHECK-NEXT: v_mov_b32_e32 v5, v4 -; CHECK-NEXT: v_mov_b32_e32 v6, v4 -; CHECK-NEXT: v_mov_b32_e32 v7, v4 -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[8:11], v[28:29], v[28:29], v[8:11] +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[4:7], v[26:27], v[26:27], v[4:7] +; CHECK-NEXT: v_mov_b64_e32 v[30:31], s[0:1] +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[6:9], v[26:27], v[28:29], v[0:3] +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[6:9], v[26:27], v[26:27], v[6:9] +; CHECK-NEXT: s_nop 3 +; CHECK-NEXT: v_cvt_f16_f32_e32 v24, v4 +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[12:15], v[26:27], v[30:31], v[0:3] ; CHECK-NEXT: s_nop 0 -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[16:19], v[28:29], v[28:29], v[4:7] -; CHECK-NEXT: ;;#ASMSTART -; CHECK-NEXT: ; def v[4:7] -; CHECK-NEXT: ;;#ASMEND -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[16:19], v[28:29], v[28:29], v[16:19] -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[24:27], v[28:29], v[30:31], v[4:7] -; CHECK-NEXT: s_nop 5 -; CHECK-NEXT: v_cvt_f16_f32_e32 v17, v8 -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[8:11], v[28:29], v[28:29], v[12:15] -; CHECK-NEXT: s_nop 2 -; CHECK-NEXT: v_mov_b64_e32 v[12:13], 0 -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], v[28:29], v[28:29], v[0:3] -; CHECK-NEXT: global_store_short v[12:13], v17, off +; CHECK-NEXT: v_mov_b32_e32 v8, 0x7fc00000 +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v10, v8 +; CHECK-NEXT: v_mov_b32_e32 v11, v8 +; CHECK-NEXT: v_cvt_f16_f32_e32 v2, v6 +; CHECK-NEXT: v_mov_b64_e32 v[0:1], 0 +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[8:11], v[26:27], v[26:27], v[8:11] +; CHECK-NEXT: global_store_short v[0:1], v2, off ; CHECK-NEXT: buffer_wbl2 sc0 sc1 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: buffer_inv sc0 sc1 -; CHECK-NEXT: v_cvt_f16_f32_e32 v9, v16 -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[20:23], v[28:29], v[28:29], v[4:7] -; CHECK-NEXT: global_store_short v[12:13], v9, off -; CHECK-NEXT: v_cvt_f16_f32_e32 v1, v8 -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[8:11], v[28:29], v[28:29], v[24:27] +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[2:5], v[26:27], v[28:29], v[16:19] +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[6:9], v[26:27], v[26:27], v[8:11] +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[20:23], v[26:27], v[26:27], v[16:19] +; CHECK-NEXT: s_nop 5 +; CHECK-NEXT: v_cvt_f16_f32_e32 v10, v6 +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[6:9], v[26:27], v[26:27], v[12:15] +; CHECK-NEXT: global_store_short v[0:1], v10, off +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[2:5], v[26:27], v[26:27], v[2:5] ; CHECK-NEXT: buffer_wbl2 sc0 sc1 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: buffer_inv sc0 sc1 -; CHECK-NEXT: v_cvt_f16_f32_e32 v14, v0 -; CHECK-NEXT: global_store_short v[12:13], v1, off -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[4:7], v[28:29], v[28:29], v[20:23] +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_cvt_f16_f32_e32 v6, v6 +; CHECK-NEXT: global_store_short v[0:1], v6, off +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[16:19], v[26:27], v[26:27], v[20:23] ; CHECK-NEXT: buffer_wbl2 sc0 sc1 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: buffer_inv sc0 sc1 -; CHECK-NEXT: global_store_short v[12:13], v14, off +; CHECK-NEXT: global_store_short v[0:1], v24, off ; CHECK-NEXT: buffer_wbl2 sc0 sc1 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: buffer_inv sc0 sc1 -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], v[30:31], v[28:29], v[8:11] +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[2:5], v[28:29], v[26:27], v[2:5] ; CHECK-NEXT: s_nop 6 -; CHECK-NEXT: v_cvt_f16_f32_e32 v8, v0 -; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[0:3], a[0:1], v[28:29], v[4:7] -; CHECK-NEXT: global_store_short v[12:13], v8, off +; CHECK-NEXT: v_cvt_f16_f32_e32 v6, v2 +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 v[2:5], v[30:31], v[26:27], v[16:19] +; CHECK-NEXT: global_store_short v[0:1], v6, off ; CHECK-NEXT: buffer_wbl2 sc0 sc1 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: buffer_inv sc0 sc1 ; CHECK-NEXT: s_nop 2 -; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0 -; CHECK-NEXT: global_store_short v[12:13], v0, off +; CHECK-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CHECK-NEXT: global_store_short v[0:1], v2, off ; CHECK-NEXT: s_endpgm entry: %k0 = call <4 x float> asm sideeffect "; def $0", "=s"() @@ -546,100 +545,14 @@ define void @test_rewrite_mfma_subreg_insert2(double %arg0, double %arg1, ptr ad define amdgpu_kernel void @test_rewrite_mfma_direct_copy_from_agpr_class(ptr addrspace(1) %arg0, ptr addrspace(1) %arg1) #0 { ; CHECK-LABEL: test_rewrite_mfma_direct_copy_from_agpr_class: ; CHECK: ; %bb.0: +; CHECK-NEXT: v_accvgpr_write_b32 a34, 2.0 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0 ; CHECK-NEXT: ;;#ASMSTART ; CHECK-NEXT: ; def a[0:31] ; CHECK-NEXT: ;;#ASMEND ; CHECK-NEXT: v_accvgpr_write_b32 a32, v0 -; CHECK-NEXT: v_accvgpr_read_b32 v63, a31 -; CHECK-NEXT: v_accvgpr_read_b32 v62, a30 -; CHECK-NEXT: v_accvgpr_read_b32 v61, a29 -; CHECK-NEXT: v_accvgpr_read_b32 v60, a28 -; CHECK-NEXT: v_accvgpr_read_b32 v59, a27 -; CHECK-NEXT: v_accvgpr_read_b32 v58, a26 -; CHECK-NEXT: v_accvgpr_read_b32 v57, a25 -; CHECK-NEXT: v_accvgpr_read_b32 v56, a24 -; CHECK-NEXT: v_accvgpr_read_b32 v55, a23 -; CHECK-NEXT: v_accvgpr_read_b32 v54, a22 -; CHECK-NEXT: v_accvgpr_read_b32 v53, a21 -; CHECK-NEXT: v_accvgpr_read_b32 v52, a20 -; CHECK-NEXT: v_accvgpr_read_b32 v51, a19 -; CHECK-NEXT: v_accvgpr_read_b32 v50, a18 -; CHECK-NEXT: v_accvgpr_read_b32 v49, a17 -; CHECK-NEXT: v_accvgpr_read_b32 v48, a16 -; CHECK-NEXT: v_accvgpr_read_b32 v47, a15 -; CHECK-NEXT: v_accvgpr_read_b32 v46, a14 -; CHECK-NEXT: v_accvgpr_read_b32 v45, a13 -; CHECK-NEXT: v_accvgpr_read_b32 v44, a12 -; CHECK-NEXT: v_accvgpr_read_b32 v43, a11 -; CHECK-NEXT: v_accvgpr_read_b32 v42, a10 -; CHECK-NEXT: v_accvgpr_read_b32 v41, a9 -; CHECK-NEXT: v_accvgpr_read_b32 v40, a8 -; CHECK-NEXT: v_accvgpr_read_b32 v39, a7 -; CHECK-NEXT: v_accvgpr_read_b32 v38, a6 -; CHECK-NEXT: v_accvgpr_read_b32 v37, a5 -; CHECK-NEXT: v_accvgpr_read_b32 v36, a4 -; CHECK-NEXT: v_accvgpr_read_b32 v35, a3 -; CHECK-NEXT: v_accvgpr_read_b32 v34, a2 -; CHECK-NEXT: v_accvgpr_read_b32 v33, a1 -; CHECK-NEXT: v_accvgpr_read_b32 v32, a0 -; CHECK-NEXT: v_accvgpr_write_b32 a0, 2.0 -; CHECK-NEXT: v_accvgpr_write_b32 a1, 4.0 -; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 -; CHECK-NEXT: s_nop 0 -; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], a0, a1, v[32:63] -; CHECK-NEXT: v_accvgpr_write_b32 a0, v32 -; CHECK-NEXT: v_accvgpr_write_b32 a1, v33 -; CHECK-NEXT: v_accvgpr_write_b32 a2, v34 -; CHECK-NEXT: v_accvgpr_write_b32 a3, v35 -; CHECK-NEXT: v_accvgpr_write_b32 a4, v36 -; CHECK-NEXT: v_accvgpr_write_b32 a5, v37 -; CHECK-NEXT: v_accvgpr_write_b32 a6, v38 -; CHECK-NEXT: v_accvgpr_write_b32 a7, v39 -; CHECK-NEXT: v_accvgpr_write_b32 a8, v40 -; CHECK-NEXT: v_accvgpr_write_b32 a9, v41 -; CHECK-NEXT: v_accvgpr_write_b32 a10, v42 -; CHECK-NEXT: v_accvgpr_write_b32 a11, v43 -; CHECK-NEXT: v_accvgpr_write_b32 a12, v44 -; CHECK-NEXT: v_accvgpr_write_b32 a13, v45 -; CHECK-NEXT: v_accvgpr_write_b32 a14, v46 -; CHECK-NEXT: v_accvgpr_write_b32 a15, v47 -; CHECK-NEXT: v_accvgpr_write_b32 a16, v48 -; CHECK-NEXT: v_accvgpr_write_b32 a17, v49 -; CHECK-NEXT: v_accvgpr_write_b32 a18, v50 -; CHECK-NEXT: v_accvgpr_write_b32 a19, v51 -; CHECK-NEXT: v_accvgpr_write_b32 a20, v52 -; CHECK-NEXT: v_accvgpr_write_b32 a21, v53 -; CHECK-NEXT: v_accvgpr_write_b32 a22, v54 -; CHECK-NEXT: v_accvgpr_write_b32 a23, v55 -; CHECK-NEXT: v_accvgpr_write_b32 a24, v56 -; CHECK-NEXT: v_accvgpr_write_b32 a25, v57 -; CHECK-NEXT: v_accvgpr_write_b32 a26, v58 -; CHECK-NEXT: v_accvgpr_write_b32 a27, v59 -; CHECK-NEXT: v_accvgpr_write_b32 a28, v60 -; CHECK-NEXT: v_accvgpr_write_b32 a29, v61 -; CHECK-NEXT: v_accvgpr_write_b32 a30, v62 -; CHECK-NEXT: v_accvgpr_write_b32 a31, v63 -; CHECK-NEXT: v_mov_b32_e32 v33, 0x41000000 -; CHECK-NEXT: v_mov_b32_e32 v34, 0x41800000 -; CHECK-NEXT: v_accvgpr_read_b32 v32, a32 -; CHECK-NEXT: v_and_b32_e32 v32, 0x3ff, v32 -; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 a[0:31], v33, v34, a[0:31] -; CHECK-NEXT: v_lshlrev_b32_e32 v32, 7, v32 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112 -; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96 -; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80 -; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64 -; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48 -; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32 -; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16 -; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1] -; CHECK-NEXT: s_nop 7 ; CHECK-NEXT: v_accvgpr_read_b32 v0, a0 -; CHECK-NEXT: v_accvgpr_read_b32 v24, a24 -; CHECK-NEXT: v_accvgpr_read_b32 v25, a25 -; CHECK-NEXT: v_accvgpr_read_b32 v26, a26 -; CHECK-NEXT: v_accvgpr_read_b32 v27, a27 ; CHECK-NEXT: v_accvgpr_read_b32 v1, a1 ; CHECK-NEXT: v_accvgpr_read_b32 v2, a2 ; CHECK-NEXT: v_accvgpr_read_b32 v3, a3 @@ -663,18 +576,60 @@ define amdgpu_kernel void @test_rewrite_mfma_direct_copy_from_agpr_class(ptr add ; CHECK-NEXT: v_accvgpr_read_b32 v21, a21 ; CHECK-NEXT: v_accvgpr_read_b32 v22, a22 ; CHECK-NEXT: v_accvgpr_read_b32 v23, a23 +; CHECK-NEXT: v_accvgpr_read_b32 v24, a24 +; CHECK-NEXT: v_accvgpr_read_b32 v25, a25 +; CHECK-NEXT: v_accvgpr_read_b32 v26, a26 +; CHECK-NEXT: v_accvgpr_read_b32 v27, a27 ; CHECK-NEXT: v_accvgpr_read_b32 v28, a28 ; CHECK-NEXT: v_accvgpr_read_b32 v29, a29 ; CHECK-NEXT: v_accvgpr_read_b32 v30, a30 ; CHECK-NEXT: v_accvgpr_read_b32 v31, a31 -; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[2:3] offset:96 -; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[2:3] offset:112 -; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[2:3] offset:64 -; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[2:3] offset:80 -; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[2:3] offset:32 -; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[2:3] offset:48 -; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[2:3] -; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[2:3] offset:16 +; CHECK-NEXT: v_accvgpr_write_b32 a33, 4.0 +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[32:63], a34, a33, v[0:31] +; CHECK-NEXT: v_mov_b32_e32 v1, 0x41000000 +; CHECK-NEXT: v_accvgpr_read_b32 v0, a32 +; CHECK-NEXT: s_nop 15 +; CHECK-NEXT: v_mov_b64_e32 v[2:3], v[32:33] +; CHECK-NEXT: v_mov_b64_e32 v[4:5], v[34:35] +; CHECK-NEXT: v_mov_b64_e32 v[6:7], v[36:37] +; CHECK-NEXT: v_mov_b64_e32 v[8:9], v[38:39] +; CHECK-NEXT: v_mov_b64_e32 v[10:11], v[40:41] +; CHECK-NEXT: v_mov_b64_e32 v[12:13], v[42:43] +; CHECK-NEXT: v_mov_b64_e32 v[14:15], v[44:45] +; CHECK-NEXT: v_mov_b64_e32 v[16:17], v[46:47] +; CHECK-NEXT: v_mov_b64_e32 v[18:19], v[48:49] +; CHECK-NEXT: v_mov_b64_e32 v[20:21], v[50:51] +; CHECK-NEXT: v_mov_b64_e32 v[22:23], v[52:53] +; CHECK-NEXT: v_mov_b64_e32 v[24:25], v[54:55] +; CHECK-NEXT: v_mov_b64_e32 v[26:27], v[56:57] +; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[58:59] +; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[60:61] +; CHECK-NEXT: v_mov_b64_e32 v[32:33], v[62:63] +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_dwordx4 v0, v[30:33], s[0:1] offset:112 +; CHECK-NEXT: global_store_dwordx4 v0, v[26:29], s[0:1] offset:96 +; CHECK-NEXT: global_store_dwordx4 v0, v[22:25], s[0:1] offset:80 +; CHECK-NEXT: global_store_dwordx4 v0, v[18:21], s[0:1] offset:64 +; CHECK-NEXT: global_store_dwordx4 v0, v[14:17], s[0:1] offset:48 +; CHECK-NEXT: global_store_dwordx4 v0, v[10:13], s[0:1] offset:32 +; CHECK-NEXT: global_store_dwordx4 v0, v[6:9], s[0:1] offset:16 +; CHECK-NEXT: global_store_dwordx4 v0, v[2:5], s[0:1] +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_mov_b32_e32 v2, 0x41800000 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 a[0:31], v1, v2, a[0:31] +; CHECK-NEXT: s_nop 15 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: global_store_dwordx4 v0, a[24:27], s[2:3] offset:96 +; CHECK-NEXT: global_store_dwordx4 v0, a[28:31], s[2:3] offset:112 +; CHECK-NEXT: global_store_dwordx4 v0, a[16:19], s[2:3] offset:64 +; CHECK-NEXT: global_store_dwordx4 v0, a[20:23], s[2:3] offset:80 +; CHECK-NEXT: global_store_dwordx4 v0, a[8:11], s[2:3] offset:32 +; CHECK-NEXT: global_store_dwordx4 v0, a[12:15], s[2:3] offset:48 +; CHECK-NEXT: global_store_dwordx4 v0, a[0:3], s[2:3] +; CHECK-NEXT: global_store_dwordx4 v0, a[4:7], s[2:3] offset:16 ; CHECK-NEXT: s_endpgm %src2 = call <32 x float> asm sideeffect "; def $0", "=a"() %mai0 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 2.0, float 4.0, <32 x float> %src2, i32 0, i32 0, i32 0) diff --git a/llvm/test/CodeGen/AMDGPU/sched.group.classification.mir b/llvm/test/CodeGen/AMDGPU/sched.group.classification.mir new file mode 100644 index 0000000..a4aad57 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/sched.group.classification.mir @@ -0,0 +1,59 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx950 -run-pass=machine-scheduler -o - %s | FileCheck %s + +--- +name: buffer_load_lds_not_valu +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + ; CHECK-LABEL: name: buffer_load_lds_not_valu + ; CHECK: liveins: $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $exec = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[DEF2]], [[DEF3]], implicit $exec + ; CHECK-NEXT: [[V_ADD_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[DEF3]], [[V_ADD_U32_e32_]], implicit $exec + ; CHECK-NEXT: $m0 = S_MOV_B32 0 + ; CHECK-NEXT: BUFFER_LOAD_DWORDX4_LDS_OFFEN [[DEF]], [[DEF1]], 0, 0, 0, 0, implicit $exec, implicit $m0 + ; CHECK-NEXT: [[V_ADD_U32_e32_2:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[V_ADD_U32_e32_]], [[V_ADD_U32_e32_1]], implicit $exec + ; CHECK-NEXT: [[V_ADD_U32_e32_3:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[V_ADD_U32_e32_1]], [[V_ADD_U32_e32_2]], implicit $exec + ; CHECK-NEXT: $m0 = S_MOV_B32 1 + ; CHECK-NEXT: BUFFER_LOAD_DWORDX4_LDS_OFFEN [[DEF]], [[DEF1]], 0, 0, 0, 0, implicit $exec, implicit $m0 + ; CHECK-NEXT: [[V_ADD_U32_e32_4:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[V_ADD_U32_e32_2]], [[V_ADD_U32_e32_3]], implicit $exec + ; CHECK-NEXT: [[V_ADD_U32_e32_5:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[V_ADD_U32_e32_3]], [[V_ADD_U32_e32_4]], implicit $exec + ; CHECK-NEXT: [[V_ADD_U32_e32_6:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[V_ADD_U32_e32_4]], [[V_ADD_U32_e32_5]], implicit $exec + ; CHECK-NEXT: dead [[V_ADD_U32_e32_7:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[V_ADD_U32_e32_5]], [[V_ADD_U32_e32_6]], implicit $exec + ; CHECK-NEXT: SCHED_GROUP_BARRIER 2, 2, 0 + ; CHECK-NEXT: SCHED_GROUP_BARRIER 4, 1, 0 + ; CHECK-NEXT: SCHED_GROUP_BARRIER 2, 2, 0 + ; CHECK-NEXT: SCHED_GROUP_BARRIER 4, 1, 0 + ; CHECK-NEXT: SCHED_GROUP_BARRIER 2, 4, 0 + ; CHECK-NEXT: S_ENDPGM 0 + $exec = IMPLICIT_DEF + %0:vgpr_32 = IMPLICIT_DEF + %1:sgpr_128 = IMPLICIT_DEF + %2:vgpr_32 = IMPLICIT_DEF + %3:vgpr_32 = IMPLICIT_DEF + %4:vgpr_32 = V_ADD_U32_e32 %2, %3, implicit $exec + %5:vgpr_32 = V_ADD_U32_e32 %3, %4, implicit $exec + $m0 = S_MOV_B32 0 + BUFFER_LOAD_DWORDX4_LDS_OFFEN %0, %1, 0, 0, 0, 0, implicit $exec, implicit $m0 + $m0 = S_MOV_B32 1 + BUFFER_LOAD_DWORDX4_LDS_OFFEN %0, %1, 0, 0, 0, 0, implicit $exec, implicit $m0 + %6:vgpr_32 = V_ADD_U32_e32 %4, %5, implicit $exec + %7:vgpr_32 = V_ADD_U32_e32 %5, %6, implicit $exec + %8:vgpr_32 = V_ADD_U32_e32 %6, %7, implicit $exec + %9:vgpr_32 = V_ADD_U32_e32 %7, %8, implicit $exec + %10:vgpr_32 = V_ADD_U32_e32 %8, %9, implicit $exec + %11:vgpr_32 = V_ADD_U32_e32 %9, %10, implicit $exec + SCHED_GROUP_BARRIER 2, 2, 0 + SCHED_GROUP_BARRIER 4, 1 ,0 + SCHED_GROUP_BARRIER 2, 2, 0 + SCHED_GROUP_BARRIER 4, 1 ,0 + SCHED_GROUP_BARRIER 2, 4, 0 + S_ENDPGM 0 +... diff --git a/llvm/test/CodeGen/AMDGPU/schedule-pending-queue.mir b/llvm/test/CodeGen/AMDGPU/schedule-pending-queue.mir new file mode 100644 index 0000000..33b2f69 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/schedule-pending-queue.mir @@ -0,0 +1,32 @@ +# RUN: llc -march=amdgcn -mcpu=gfx908 -run-pass machine-scheduler --misched-prera-direction=topdown -verify-machineinstrs %s -o - -debug-only=machine-scheduler 2>&1 | FileCheck %s +# REQUIRES: asserts + +# Check that cycle counts are consistent with hazards. + +# CHECK: Cycle: 3 TopQ.A +# CHECK: hazard: SU(6) HWXDL[0]=9c, is later than CurrCycle = 3c +# CHECK-NOT: Cycle: 9 TopQ.A +# CHECK: Cycle: 83 TopQ.A +# CHECK: Checking pending node SU(6) +# CHECK: Move SU(6) into Available Q + +--- +name: pending_queue_ready_cycle +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr4_sgpr5 + + %2:sgpr_128 = IMPLICIT_DEF + %14:vgpr_32 = IMPLICIT_DEF + %15:vgpr_32 = IMPLICIT_DEF + %18:areg_512 = IMPLICIT_DEF + %18:areg_512 = V_MFMA_F32_16X16X1F32_mac_e64 %15, %14, %18, 0, 0, 0, implicit $mode, implicit $exec + %5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, implicit $exec + %18:areg_512 = V_MFMA_F32_16X16X1F32_mac_e64 %15, %14, %18, 0, 0, 0, implicit $mode, implicit $exec + undef %84.sub0:vreg_128_align2 = V_ADD_U32_e32 %5.sub0, %14, implicit $exec + %7:vreg_512 = COPY %18 + SCHED_BARRIER 0 + S_NOP 0, implicit %18, implicit %7, implicit %84 + S_ENDPGM 0 +... diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-phys-copy.mir b/llvm/test/CodeGen/AMDGPU/sgpr-phys-copy.mir index 9553fcc..f11fe4a 100644 --- a/llvm/test/CodeGen/AMDGPU/sgpr-phys-copy.mir +++ b/llvm/test/CodeGen/AMDGPU/sgpr-phys-copy.mir @@ -59,6 +59,15 @@ body: | ... --- +name: src_shared_base_to_vcc +body: | + bb.0: + ; GFX9-LABEL: name: src_shared_base_to_vcc + ; GFX9: $vcc = S_MOV_B64 $src_shared_base + $vcc = COPY $src_shared_base +... + +--- name: sgpr96_aligned_src_dst body: | bb.0: diff --git a/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-bitcounts.mir b/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-bitcounts.mir index c8fee5d..7cbe5de 100644 --- a/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-bitcounts.mir +++ b/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-bitcounts.mir @@ -119,9 +119,10 @@ body: | ; CHECK: [[R32:%[0-9]+]]:_(s32) = G_SUB [[COUNT]], [[BITDIFF]] %2(s16) = G_CTLZ %1 - ; CHECK: [[SHIFTEDR:%[0-9]+]]:_(s32) = G_SHL [[R32]], [[BITDIFF]] - ; CHECK: [[R:%[0-9]+]]:_(s32) = G_ASHR [[SHIFTEDR]], [[BITDIFF]] - ; CHECK: $r0 = COPY [[R]] + ; LIBCALLS: [[SHIFTEDR:%[0-9]+]]:_(s32) = G_SHL [[R32]], [[BITDIFF]] + ; LIBCALLS: [[R:%[0-9]+]]:_(s32) = G_ASHR [[SHIFTEDR]], [[BITDIFF]] + ; LIBCALLS: $r0 = COPY [[R]] + ; CLZ: $r0 = COPY [[R32]] %3(s32) = G_SEXT %2(s16) $r0 = COPY %3(s32) BX_RET 14, $noreg, implicit $r0 diff --git a/llvm/test/CodeGen/ARM/call-graph-section-addrtaken.ll b/llvm/test/CodeGen/ARM/call-graph-section-addrtaken.ll index a2d6ca9..972a470 100644 --- a/llvm/test/CodeGen/ARM/call-graph-section-addrtaken.ll +++ b/llvm/test/CodeGen/ARM/call-graph-section-addrtaken.ll @@ -27,7 +27,7 @@ entry: !1 = !{i64 0, !"_ZTSFivE.generalized"} !2 = !{i64 0, !"_ZTSFviE.generalized"} -; CHECK: .section .callgraph,"o",%progbits,.text +; CHECK: .section .llvm.callgraph,"o",%progbits,.text ;; Version ; CHECK-NEXT: .byte 0 ;; Flags -- Potential indirect target so LSB is set to 1. Other bits are 0. diff --git a/llvm/test/CodeGen/ARM/call-graph-section-assembly.ll b/llvm/test/CodeGen/ARM/call-graph-section-assembly.ll index bf5249e..ec8d5b8 100644 --- a/llvm/test/CodeGen/ARM/call-graph-section-assembly.ll +++ b/llvm/test/CodeGen/ARM/call-graph-section-assembly.ll @@ -1,8 +1,8 @@ ;; Test if temporary labels are generated for each indirect callsite. -;; Test if the .callgraph section contains the MD5 hash of callees' type (type id) +;; Test if the .llvm.callgraph section contains the MD5 hash of callees' type (type id) ;; is correctly paired with its corresponding temporary label generated for indirect ;; call sites annotated with !callee_type metadata. -;; Test if the .callgraph section contains unique direct callees. +;; Test if the .llvm.callgraph section contains unique direct callees. ; RUN: llc -mtriple=arm-unknown-linux --call-graph-section -o - < %s | FileCheck %s @@ -36,7 +36,7 @@ entry: !4 = !{!5} !5 = !{i64 0, !"_ZTSFPvS_E.generalized"} -; CHECK: .section .callgraph,"o",%progbits,.text +; CHECK: .section .llvm.callgraph,"o",%progbits,.text ;; Version ; CHECK-NEXT: .byte 0 ;; Flags diff --git a/llvm/test/CodeGen/ARM/call-graph-section-tailcall.ll b/llvm/test/CodeGen/ARM/call-graph-section-tailcall.ll index d577603..8036004 100644 --- a/llvm/test/CodeGen/ARM/call-graph-section-tailcall.ll +++ b/llvm/test/CodeGen/ARM/call-graph-section-tailcall.ll @@ -1,7 +1,7 @@ -;; Tests that we store the type identifiers in .callgraph section of the object file for tailcalls. +;; Tests that we store the type identifiers in .llvm.callgraph section of the object file for tailcalls. ; RUN: llc -mtriple=arm-unknown-linux --call-graph-section -filetype=obj -o - < %s | \ -; RUN: llvm-readelf -x .callgraph - | FileCheck %s +; RUN: llvm-readelf -x .llvm.callgraph - | FileCheck %s define i32 @check_tailcall(ptr %func, i8 %x) !type !0 { entry: @@ -27,7 +27,7 @@ declare !type !2 i32 @bar(i8 signext) !2 = !{i64 0, !"_ZTSFicE.generalized"} !3 = !{i64 0, !"_ZTSFiiE.generalized"} -; CHECK: Hex dump of section '.callgraph': +; CHECK: Hex dump of section '.llvm.callgraph': ; CHECK-NEXT: 0x00000000 00050000 00008e19 0b7f3326 e3000154 ; CHECK-NEXT: 0x00000010 86bc5981 4b8e3000 05100000 00a150b8 ;; Verify that the type id 0x308e4b8159bc8654 is in section. diff --git a/llvm/test/CodeGen/ARM/call-graph-section.ll b/llvm/test/CodeGen/ARM/call-graph-section.ll index 928a1067..167cc6f 100644 --- a/llvm/test/CodeGen/ARM/call-graph-section.ll +++ b/llvm/test/CodeGen/ARM/call-graph-section.ll @@ -1,7 +1,7 @@ -;; Tests that we store the type identifiers in .callgraph section of the object file. +;; Tests that we store the type identifiers in .llvm.callgraph section of the object file. ; RUN: llc -mtriple=arm-unknown-linux --call-graph-section -filetype=obj -o - < %s | \ -; RUN: llvm-readelf -x .callgraph - | FileCheck %s +; RUN: llvm-readelf -x .llvm.callgraph - | FileCheck %s declare !type !0 void @foo() @@ -31,7 +31,7 @@ entry: ;; Make sure following type IDs are in call graph section ;; 0x5eecb3e2444f731f, 0x814b8e305486bc59, 0xf897fd777ade6814 -; CHECK: Hex dump of section '.callgraph': +; CHECK: Hex dump of section '.llvm.callgraph': ; CHECK-NEXT: 0x00000000 00050000 00000000 00000000 00000324 ; CHECK-NEXT: 0x00000010 44f731f5 eecb3e54 86bc5981 4b8e307a ; CHECK-NEXT: 0x00000020 de6814f8 97fd77 diff --git a/llvm/test/CodeGen/ARM/carry.ll b/llvm/test/CodeGen/ARM/carry.ll index 558e2b0..a652241 100644 --- a/llvm/test/CodeGen/ARM/carry.ll +++ b/llvm/test/CodeGen/ARM/carry.ll @@ -1,61 +1,84 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 ; RUN: llc -mtriple=armv6t2-eabi %s -o - | FileCheck %s define i64 @f1(i64 %a, i64 %b) { ; CHECK-LABEL: f1: -; CHECK: subs r -; CHECK: sbc r +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: subs r0, r0, r2 +; CHECK-NEXT: sbc r1, r1, r3 +; CHECK-NEXT: bx lr entry: - %tmp = sub i64 %a, %b - ret i64 %tmp + %tmp = sub i64 %a, %b + ret i64 %tmp } define i64 @f2(i64 %a, i64 %b) { ; CHECK-LABEL: f2: -; CHECK: lsl r -; CHECK: orr r -; CHECK: rsbs r -; CHECK: sbc r +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: lsl r1, r1, #1 +; CHECK-NEXT: orr r1, r1, r0, lsr #31 +; CHECK-NEXT: rsbs r0, r2, r0, lsl #1 +; CHECK-NEXT: sbc r1, r1, r3 +; CHECK-NEXT: bx lr entry: - %tmp1 = shl i64 %a, 1 - %tmp2 = sub i64 %tmp1, %b - ret i64 %tmp2 + %tmp1 = shl i64 %a, 1 + %tmp2 = sub i64 %tmp1, %b + ret i64 %tmp2 } ; add with live carry define i64 @f3(i32 %al, i32 %bl) { ; CHECK-LABEL: f3: -; CHECK: adds r -; CHECK: adc r +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: adds r0, r0, r1 +; CHECK-NEXT: mov r2, #0 +; CHECK-NEXT: adcs r0, r1, #0 +; CHECK-NEXT: adc r1, r2, #0 +; CHECK-NEXT: bx lr entry: - ; unsigned wide add - %aw = zext i32 %al to i64 - %bw = zext i32 %bl to i64 - %cw = add i64 %aw, %bw - ; ch == carry bit - %ch = lshr i64 %cw, 32 - %dw = add i64 %ch, %bw - ret i64 %dw + ; unsigned wide add + %aw = zext i32 %al to i64 + %bw = zext i32 %bl to i64 + %cw = add i64 %aw, %bw + ; ch == carry bit + %ch = lshr i64 %cw, 32 + %dw = add i64 %ch, %bw + ret i64 %dw } ; rdar://10073745 define i64 @f4(i64 %x) nounwind readnone { -entry: ; CHECK-LABEL: f4: -; CHECK: rsbs r -; CHECK: rsc r +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: rsc r1, r1, #0 +; CHECK-NEXT: bx lr +entry: %0 = sub nsw i64 0, %x ret i64 %0 } ; rdar://12559385 define i64 @f5(i32 %vi) { -entry: ; CHECK-LABEL: f5: -; CHECK: movw [[REG:r[0-9]+]], #36102 -; CHECK: sbc r{{[0-9]+}}, r{{[0-9]+}}, [[REG]] - %v0 = zext i32 %vi to i64 - %v1 = xor i64 %v0, -155057456198619 - %v4 = add i64 %v1, 155057456198619 - %v5 = add i64 %v4, %v1 - ret i64 %v5 +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: movw r1, #19493 +; CHECK-NEXT: movw r2, #29433 +; CHECK-NEXT: movt r1, #57191 +; CHECK-NEXT: eor r0, r0, r1 +; CHECK-NEXT: movw r3, #46043 +; CHECK-NEXT: movt r2, #65535 +; CHECK-NEXT: adds r0, r0, r0 +; CHECK-NEXT: movw r1, #36102 +; CHECK-NEXT: sbc r2, r2, r1 +; CHECK-NEXT: movt r3, #8344 +; CHECK-NEXT: adds r0, r0, r3 +; CHECK-NEXT: adc r1, r2, r1 +; CHECK-NEXT: bx lr +entry: + %v0 = zext i32 %vi to i64 + %v1 = xor i64 %v0, -155057456198619 + %v4 = add i64 %v1, 155057456198619 + %v5 = add i64 %v4, %v1 + ret i64 %v5 } diff --git a/llvm/test/CodeGen/ARM/nnan-fsub.ll b/llvm/test/CodeGen/ARM/nnan-fsub.ll index 0183908..78dd36f 100644 --- a/llvm/test/CodeGen/ARM/nnan-fsub.ll +++ b/llvm/test/CodeGen/ARM/nnan-fsub.ll @@ -1,18 +1,22 @@ -; RUN: llc -mcpu=cortex-a9 < %s | FileCheck -check-prefix=SAFE %s -; RUN: llc -mcpu=cortex-a9 --enable-no-nans-fp-math < %s | FileCheck -check-prefix=FAST %s +; RUN: llc -mcpu=cortex-a9 < %s | FileCheck %s target triple = "armv7-apple-ios" -; SAFE: test -; FAST: test +; CHECK-LABEL: test define float @test(float %x, float %y) { entry: -; SAFE: vmul.f32 -; SAFE: vsub.f32 -; FAST: mov r0, #0 +; CHECK: vmul.f32 +; CHECK-NEXT: vsub.f32 %0 = fmul float %x, %y %1 = fsub float %0, %0 ret float %1 } - +; CHECK-LABEL: test_nnan +define float @test_nnan(float %x, float %y) { +entry: +; CHECK: mov r0, #0 + %0 = fmul float %x, %y + %1 = fsub nnan float %0, %0 + ret float %1 +} diff --git a/llvm/test/CodeGen/DirectX/CBufferAccess/memcpy.ll b/llvm/test/CodeGen/DirectX/CBufferAccess/memcpy.ll index a78fdd5..f1486f97 100644 --- a/llvm/test/CodeGen/DirectX/CBufferAccess/memcpy.ll +++ b/llvm/test/CodeGen/DirectX/CBufferAccess/memcpy.ll @@ -74,7 +74,7 @@ entry: ; CHECK: [[UPTO1:%.*]] = insertelement <3 x double> [[UPTO0]], double [[Y]], i32 1 ; CHECK: [[UPTO2:%.*]] = insertelement <3 x double> [[UPTO1]], double [[Z]], i32 2 ; CHECK: [[DEST:%.*]] = getelementptr inbounds i8, ptr [[A2_COPY:%.*]], i32 0 -; CHECK: store <3 x double> [[UPTO2]], ptr [[DEST]], align 32 +; CHECK: store <3 x double> [[UPTO2]], ptr [[DEST]], align 8 ; CHECK: [[LOAD:%.*]] = call { double, double } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", {{.*}})) [[CB]], i32 5) ; CHECK: [[X:%.*]] = extractvalue { double, double } [[LOAD]], 0 ; CHECK: [[Y:%.*]] = extractvalue { double, double } [[LOAD]], 1 @@ -83,9 +83,9 @@ entry: ; CHECK: [[UPTO0:%.*]] = insertelement <3 x double> poison, double [[X]], i32 0 ; CHECK: [[UPTO1:%.*]] = insertelement <3 x double> [[UPTO0]], double [[Y]], i32 1 ; CHECK: [[UPTO2:%.*]] = insertelement <3 x double> [[UPTO1]], double [[Z]], i32 2 -; CHECK: [[DEST:%.*]] = getelementptr inbounds i8, ptr [[A2_COPY]], i32 32 -; CHECK: store <3 x double> [[UPTO2]], ptr [[DEST]], align 32 - call void @llvm.memcpy.p0.p2.i32(ptr align 32 %a2.copy, ptr addrspace(2) align 32 @a2, i32 64, i1 false) +; CHECK: [[DEST:%.*]] = getelementptr inbounds i8, ptr [[A2_COPY]], i32 24 +; CHECK: store <3 x double> [[UPTO2]], ptr [[DEST]], align 8 + call void @llvm.memcpy.p0.p2.i32(ptr align 32 %a2.copy, ptr addrspace(2) align 32 @a2, i32 48, i1 false) ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", {{.*}})), ptr @CB.cb, align 4 ; CHECK: [[LOAD:%.*]] = call { half, half, half, half, half, half, half, half } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", {{.*}})) [[CB]], i32 7) diff --git a/llvm/test/CodeGen/DirectX/CBufferLoadLegacy-errors.ll b/llvm/test/CodeGen/DirectX/CBufferLoadLegacy-errors.ll index 71dcf11..196560f 100644 --- a/llvm/test/CodeGen/DirectX/CBufferLoadLegacy-errors.ll +++ b/llvm/test/CodeGen/DirectX/CBufferLoadLegacy-errors.ll @@ -11,11 +11,11 @@ declare void @f16_user(half) ; CHECK-SAME: in function four64 ; CHECK-SAME: Type mismatch between intrinsic and DXIL op define void @four64() "hlsl.export" { - %buffer = call target("dx.CBuffer", target("dx.Layout", {double}, 8, 0)) + %buffer = call target("dx.CBuffer", <{ double }>) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null) %load = call {double, double, double, double} @llvm.dx.resource.load.cbufferrow.4( - target("dx.CBuffer", target("dx.Layout", {double}, 8, 0)) %buffer, + target("dx.CBuffer", <{ double }>) %buffer, i32 0) %data = extractvalue {double, double, double, double} %load, 0 @@ -28,11 +28,11 @@ define void @four64() "hlsl.export" { ; CHECK-SAME: in function two32 ; CHECK-SAME: Type mismatch between intrinsic and DXIL op define void @two32() "hlsl.export" { - %buffer = call target("dx.CBuffer", target("dx.Layout", {float}, 4, 0)) + %buffer = call target("dx.CBuffer", <{ float }>) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null) %load = call {float, float} @llvm.dx.resource.load.cbufferrow.2( - target("dx.CBuffer", target("dx.Layout", {float}, 4, 0)) %buffer, + target("dx.CBuffer", <{ float }>) %buffer, i32 0) %data = extractvalue {float, float} %load, 0 @@ -41,5 +41,5 @@ define void @two32() "hlsl.export" { ret void } -declare { double, double, double, double } @llvm.dx.resource.load.cbufferrow.4.f64.f64.f64.f64.tdx.CBuffer_tdx.Layout_sl_f64s_8_0tt(target("dx.CBuffer", target("dx.Layout", { double }, 8, 0)), i32) -declare { float, float } @llvm.dx.resource.load.cbufferrow.2.f32.f32.tdx.CBuffer_tdx.Layout_sl_f32s_4_0tt(target("dx.CBuffer", target("dx.Layout", { float }, 4, 0)), i32) +declare { double, double, double, double } @llvm.dx.resource.load.cbufferrow.4.f64.f64.f64.f64.tdx.CBuffer_sl_f64st(target("dx.CBuffer", <{ double }>), i32) +declare { float, float } @llvm.dx.resource.load.cbufferrow.2.f32.f32.tdx.CBuffer_sl_f32st(target("dx.CBuffer", <{ float }>), i32) diff --git a/llvm/test/CodeGen/DirectX/CBufferLoadLegacy.ll b/llvm/test/CodeGen/DirectX/CBufferLoadLegacy.ll index d690651..dd40aa8 100644 --- a/llvm/test/CodeGen/DirectX/CBufferLoadLegacy.ll +++ b/llvm/test/CodeGen/DirectX/CBufferLoadLegacy.ll @@ -8,12 +8,12 @@ declare void @f16_user(half) ; CHECK-LABEL: define void @loadf32 define void @loadf32() { - %buffer = call target("dx.CBuffer", target("dx.Layout", {float}, 4, 0)) + %buffer = call target("dx.CBuffer", <{ float }>) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null) ; CHECK: [[DATA:%.*]] = call %dx.types.CBufRet.f32 @dx.op.cbufferLoadLegacy.f32(i32 59, %dx.types.Handle %{{.*}}, i32 0) %load = call {float, float, float, float} @llvm.dx.resource.load.cbufferrow.4( - target("dx.CBuffer", target("dx.Layout", {float}, 4, 0)) %buffer, + target("dx.CBuffer", <{ float }>) %buffer, i32 0) %data = extractvalue {float, float, float, float} %load, 0 @@ -27,12 +27,12 @@ define void @loadf32() { ; CHECK-LABEL: define void @loadf64 define void @loadf64() { %buffer = call - target("dx.CBuffer", target("dx.Layout", {double, double, double, double}, 64, 0, 8, 16, 24)) + target("dx.CBuffer", <{ <4 x double> }>) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null) ; CHECK: [[DATA:%.*]] = call %dx.types.CBufRet.f64 @dx.op.cbufferLoadLegacy.f64(i32 59, %dx.types.Handle %{{.*}}, i32 1) %load = call {double, double} @llvm.dx.resource.load.cbufferrow.2( - target("dx.CBuffer", target("dx.Layout", {double, double, double, double}, 64, 0, 8, 16, 24)) %buffer, + target("dx.CBuffer", <{ <4 x double> }>) %buffer, i32 1) %data = extractvalue {double, double} %load, 1 @@ -46,12 +46,12 @@ define void @loadf64() { ; CHECK-LABEL: define void @loadf16 define void @loadf16() { %buffer = call - target("dx.CBuffer", target("dx.Layout", {half}, 2, 0)) + target("dx.CBuffer", <{ half }>) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null) ; CHECK: [[DATA:%.*]] = call %dx.types.CBufRet.f16.8 @dx.op.cbufferLoadLegacy.f16(i32 59, %dx.types.Handle %{{.*}}, i32 0) %load = call {half, half, half, half, half, half, half, half} @llvm.dx.resource.load.cbufferrow.8( - target("dx.CBuffer", target("dx.Layout", {half}, 2, 0)) %buffer, + target("dx.CBuffer", <{ half }>) %buffer, i32 0) %data = extractvalue {half, half, half, half, half, half, half, half} %load, 0 diff --git a/llvm/test/CodeGen/DirectX/ContainerData/PSVResources-order.ll b/llvm/test/CodeGen/DirectX/ContainerData/PSVResources-order.ll index bcf82a6..5cd67be 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/PSVResources-order.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/PSVResources-order.ll @@ -18,7 +18,7 @@ define void @main() #0 { %srv0 = call target("dx.RawBuffer", i8, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i8_0_0t( i32 1, i32 8, i32 1, i32 0, ptr null) - %cbuf = call target("dx.CBuffer", target("dx.Layout", {float}, 4, 0)) + %cbuf = call target("dx.CBuffer", <{ float }>) @llvm.dx.resource.handlefrombinding(i32 3, i32 2, i32 1, i32 0, ptr null) ret void } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/PSVResources.ll b/llvm/test/CodeGen/DirectX/ContainerData/PSVResources.ll index bea0310..d792078 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/PSVResources.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/PSVResources.ll @@ -14,7 +14,7 @@ define void @main() #0 { ; CHECK: Kind: CBuffer ; CHECK: Flags: ; CHECK: UsedByAtomic64: false - %cbuf = call target("dx.CBuffer", target("dx.Layout", {float}, 4, 0)) + %cbuf = call target("dx.CBuffer", <{ float }>) @llvm.dx.resource.handlefrombinding(i32 3, i32 2, i32 1, i32 0, ptr null) ; ByteAddressBuffer Buf : register(t8, space1) @@ -94,6 +94,18 @@ define void @main() #0 { %uav2_2 = call target("dx.TypedBuffer", <4 x float>, 1, 0, 0) @llvm.dx.resource.handlefrombinding.tdx.TypedBuffer_f32_1_0( i32 4, i32 0, i32 10, i32 5, ptr null) + + ; RWBuffer<float4> UnboundedArray[] : register(u10, space5) +; CHECK: - Type: UAVTyped +; CHECK: Space: 5 +; CHECK: LowerBound: 10 +; CHECK: UpperBound: 4294967295 +; CHECK: Kind: TypedBuffer +; CHECK: Flags: +; CHECK: UsedByAtomic64: false + ; RWBuffer<float4> Buf = BufferArray[100]; + %uav3 = call target("dx.TypedBuffer", <4 x float>, 1, 0, 0) + @llvm.dx.resource.handlefrombinding(i32 5, i32 10, i32 -1, i32 100, ptr null) ret void } diff --git a/llvm/test/CodeGen/DirectX/CreateHandleFromBinding.ll b/llvm/test/CodeGen/DirectX/CreateHandleFromBinding.ll index 38f2de2..671fcef 100644 --- a/llvm/test/CodeGen/DirectX/CreateHandleFromBinding.ll +++ b/llvm/test/CodeGen/DirectX/CreateHandleFromBinding.ll @@ -72,7 +72,7 @@ define void @test_bindings() { ; CHECK: call %dx.types.Handle @dx.op.annotateHandle(i32 216, %dx.types.Handle [[BUF5]], %dx.types.ResourceProperties { i32 10, i32 1033 }) #[[#ATTR]] ; cbuffer cb0 : register(b0) { int4 i; float4 f; } - %cb0 = call target("dx.CBuffer", target("dx.Layout", {<4 x i32>, <4 x float>}, 32, 0, 16)) + %cb0 = call target("dx.CBuffer", <{ <4 x i32>, <4 x float> }>) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null) ; CHECK: [[BUF6:%.*]] = call %dx.types.Handle @dx.op.createHandleFromBinding(i32 217, %dx.types.ResBind { i32 0, i32 0, i32 0, i8 2 }, i32 0, i1 false) #[[#ATTR]] ; CHECK: call %dx.types.Handle @dx.op.annotateHandle(i32 216, %dx.types.Handle [[BUF6]], %dx.types.ResourceProperties { i32 13, i32 32 }) #[[#ATTR]] diff --git a/llvm/test/CodeGen/DirectX/ForwardHandleAccesses/cbuffer-access.ll b/llvm/test/CodeGen/DirectX/ForwardHandleAccesses/cbuffer-access.ll index 26b157f..d674863 100644 --- a/llvm/test/CodeGen/DirectX/ForwardHandleAccesses/cbuffer-access.ll +++ b/llvm/test/CodeGen/DirectX/ForwardHandleAccesses/cbuffer-access.ll @@ -4,27 +4,27 @@ %__cblayout_CB2 = type <{ float }> %struct.Scalars = type { float, i32, i32 } -@CB.cb = local_unnamed_addr global target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 12, 0, 4, 8)) poison -@CB2.cb = local_unnamed_addr global target("dx.CBuffer", target("dx.Layout", %__cblayout_CB2, 4, 0)) poison +@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison +@CB2.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB2) poison define void @main() local_unnamed_addr #1 { entry: ; CHECK: [[CB:%.*]] = tail call target({{.*}}) @llvm.dx.resource.handlefrombinding - %h = tail call target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 12, 0, 4, 8)) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null) - store target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 12, 0, 4, 8)) %h, ptr @CB.cb, align 4 + %h = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null) + store target("dx.CBuffer", %__cblayout_CB) %h, ptr @CB.cb, align 4 %_ZL3Out_h.i.i = tail call target("dx.RawBuffer", %struct.Scalars, 1, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null) ; CHECK-NOT: load target({{.*}}), ptr @CB.cb - %cb = load target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 12, 0, 4, 8)), ptr @CB.cb, align 4 + %cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 4 ; CHECK: call { float, float, float, float } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target({{.*}}) [[CB]], i32 0) - %0 = call { float, float, float, float } @llvm.dx.resource.load.cbufferrow.4(target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 12, 0, 4, 8)) %cb, i32 0) + %0 = call { float, float, float, float } @llvm.dx.resource.load.cbufferrow.4(target("dx.CBuffer", %__cblayout_CB) %cb, i32 0) %1 = extractvalue { float, float, float, float } %0, 0 call void @llvm.dx.resource.store.rawbuffer(target("dx.RawBuffer", %struct.Scalars, 1, 0) %_ZL3Out_h.i.i, i32 0, i32 0, float %1) - + ; CHECK: [[CB2:%.*]] = tail call target({{.*}}) @llvm.dx.resource.handlefromimplicitbinding - %h2 = tail call target("dx.CBuffer", target("dx.Layout", %__cblayout_CB2, 4, 0)) @llvm.dx.resource.handlefromimplicitbinding(i32 100, i32 0, i32 1, i32 0, ptr null) - store target("dx.CBuffer", target("dx.Layout", %__cblayout_CB2, 4, 0)) %h2, ptr @CB2.cb, align 4 + %h2 = tail call target("dx.CBuffer", %__cblayout_CB2) @llvm.dx.resource.handlefromimplicitbinding(i32 100, i32 0, i32 1, i32 0, ptr null) + store target("dx.CBuffer", %__cblayout_CB2) %h2, ptr @CB2.cb, align 4 ; CHECK-NOT: load target({{.*}}), ptr @CB2.cb - %cb2 = load target("dx.CBuffer", target("dx.Layout", %__cblayout_CB2, 4, 0)), ptr @CB2.cb, align 4 + %cb2 = load target("dx.CBuffer", %__cblayout_CB2), ptr @CB2.cb, align 4 ret void } diff --git a/llvm/test/CodeGen/DirectX/Metadata/cbuffer_metadata.ll b/llvm/test/CodeGen/DirectX/Metadata/cbuffer-layouttype.ll index 7ba2ed2..85952c9 100644 --- a/llvm/test/CodeGen/DirectX/Metadata/cbuffer_metadata.ll +++ b/llvm/test/CodeGen/DirectX/Metadata/cbuffer-layouttype.ll @@ -1,3 +1,6 @@ +; TODO: Remove this test once we've updated the frontend to use explicit +; padding. The cbuffer-metadata.ll test covers the newer logic. + ; RUN: opt -S -dxil-translate-metadata < %s | FileCheck %s ; RUN: opt -S --passes="dxil-pretty-printer" < %s 2>&1 | FileCheck %s --check-prefix=PRINT ; RUN: llc %s --filetype=asm -o - < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,PRINT @@ -19,11 +22,11 @@ target triple = "dxil-pc-shadermodel6.6-compute" ; PRINT:; Resource Bindings: ; PRINT-NEXT:; -; PRINT-NEXT:; Name Type Format Dim ID HLSL Bind Count -; PRINT-NEXT:; ------------------------------ ---------- ------- ----------- ------- -------------- ------ -; PRINT-NEXT:; CB1 cbuffer NA NA CB0 cb0 1 -; PRINT-NEXT:; CB2 cbuffer NA NA CB1 cb1 1 -; PRINT-NEXT:; MyConstants cbuffer NA NA CB2 cb5,space15 1 +; PRINT-NEXT:; Name Type Format Dim ID HLSL Bind Count +; PRINT-NEXT:; ---- +; PRINT-NEXT:; CB1 cbuffer NA NA CB0 cb0 1 +; PRINT-NEXT:; CB2 cbuffer NA NA CB1 cb1 1 +; PRINT-NEXT:; MyConstants cbuffer NA NA CB2 cb5,space15 1 define void @test() #0 { diff --git a/llvm/test/CodeGen/DirectX/Metadata/cbuffer-metadata.ll b/llvm/test/CodeGen/DirectX/Metadata/cbuffer-metadata.ll new file mode 100644 index 0000000..6b90e17 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/Metadata/cbuffer-metadata.ll @@ -0,0 +1,89 @@ +; RUN: opt -S -dxil-translate-metadata < %s | FileCheck %s +; RUN: opt -S --passes="dxil-pretty-printer" < %s 2>&1 | FileCheck %s --check-prefix=PRINT +; RUN: llc %s --filetype=asm -o - < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,PRINT + +target triple = "dxil-pc-shadermodel6.6-compute" + +%__cblayout_CB1 = type <{ float, i32, double, <2 x i32> }> +@CB1.cb = global target("dx.CBuffer", %__cblayout_CB1) poison +@CB1.str = private unnamed_addr constant [4 x i8] c"CB1\00", align 1 + +%__cblayout_CB2 = type <{ float, target("dx.Padding", 4), double, float, half, i16, i64, i32 }> +@CB2.cb = global target("dx.CBuffer", %__cblayout_CB2) poison +@CB2.str = private unnamed_addr constant [4 x i8] c"CB2\00", align 1 + +%__cblayout_MyConstants = type <{ + double, target("dx.Padding", 8), + <3 x float>, float, + <3 x double>, half, target("dx.Padding", 6), + <2 x double>, + float, <3 x half>, <3 x half> +}> +@MyConstants.cb = global target("dx.CBuffer", %__cblayout_MyConstants) poison +@MyConstants.str = private unnamed_addr constant [12 x i8] c"MyConstants\00", align 1 + +; PRINT:; Resource Bindings: +; PRINT-NEXT:; +; PRINT-NEXT:; Name Type Format Dim ID HLSL Bind Count +; PRINT-NEXT:; ---- +; PRINT-NEXT:; CB1 cbuffer NA NA CB0 cb0 1 +; PRINT-NEXT:; CB2 cbuffer NA NA CB1 cb1 1 +; PRINT-NEXT:; MyConstants cbuffer NA NA CB2 cb5,space15 1 + +define void @test() #0 { + + ; cbuffer CB1 : register(b0) { + ; float a; + ; int b; + ; double c; + ; int2 d; + ; } + %CB1.cb_h = call target("dx.CBuffer", %__cblayout_CB1) + @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr @CB1.str) + + ; cbuffer CB2 : register(b0) { + ; float a; + ; double b; + ; float c; + ; half d; + ; uint16_t e; + ; int64_t f; + ; int g; + ;} + %CB2.cb_h = call target("dx.CBuffer", %__cblayout_CB2) + @llvm.dx.resource.handlefrombinding(i32 0, i32 1, i32 1, i32 0, ptr @CB2.str) + + ; cbuffer CB3 : register(b5) { + ; double B0; + ; float3 B1; + ; float B2; + ; double3 B3; + ; half B4; + ; double2 B5; + ; float B6; + ; half3 B7; + ; half3 B8; + ; } + %CB3.cb_h = call target("dx.CBuffer", %__cblayout_MyConstants) + @llvm.dx.resource.handlefrombinding(i32 15, i32 5, i32 1, i32 0, ptr @MyConstants.str) + + ret void +} + +attributes #0 = { noinline nounwind "hlsl.shader"="compute" } + +; CHECK: %CBuffer.CB1 = type { { float, i32, double, <2 x i32> } } +; CHECK: %CBuffer.CB2 = type { { float, double, float, half, i16, i64, i32 } } +; CHECK: %CBuffer.MyConstants = type { { double, <3 x float>, float, <3 x double>, half, <2 x double>, float, <3 x half>, <3 x half> } } + +; CHECK: @CB1 = external constant %CBuffer.CB1 +; CHECK: @CB2 = external constant %CBuffer.CB2 +; CHECK: @MyConstants = external constant %CBuffer.MyConstants + +; CHECK: !dx.resources = !{[[ResList:[!][0-9]+]]} + +; CHECK: [[ResList]] = !{null, null, [[CBList:[!][0-9]+]], null} +; CHECK: [[CBList]] = !{![[CB1:[0-9]+]], ![[CB2:[0-9]+]], ![[MYCONSTANTS:[0-9]+]]} +; CHECK: ![[CB1]] = !{i32 0, ptr @CB1, !"CB1", i32 0, i32 0, i32 1, i32 24, null} +; CHECK: ![[CB2]] = !{i32 1, ptr @CB2, !"CB2", i32 0, i32 1, i32 1, i32 36, null} +; CHECK: ![[MYCONSTANTS]] = !{i32 2, ptr @MyConstants, !"MyConstants", i32 15, i32 5, i32 1, i32 96, null} diff --git a/llvm/test/CodeGen/DirectX/Metadata/cbuffer-only.ll b/llvm/test/CodeGen/DirectX/Metadata/cbuffer-only.ll index e2a1c09..0b454c1 100644 --- a/llvm/test/CodeGen/DirectX/Metadata/cbuffer-only.ll +++ b/llvm/test/CodeGen/DirectX/Metadata/cbuffer-only.ll @@ -7,7 +7,7 @@ target triple = "dxil-pc-shadermodel6.6-compute" define void @cbuffer_is_only_binding() { - %cbuf = call target("dx.CBuffer", target("dx.Layout", {float}, 4, 0)) + %cbuf = call target("dx.CBuffer", <{ float }>) @llvm.dx.resource.handlefrombinding(i32 1, i32 8, i32 1, i32 0, ptr null) ; CHECK: %CBuffer = type { float } diff --git a/llvm/test/CodeGen/DirectX/bufferGetDimensions.ll b/llvm/test/CodeGen/DirectX/bufferGetDimensions.ll new file mode 100644 index 0000000..ff03bf1 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/bufferGetDimensions.ll @@ -0,0 +1,16 @@ +; RUN: opt -S -dxil-op-lower %s | FileCheck %s + +target triple = "dxil-pc-shadermodel6.6-compute" + +define i32 @test_getdimensions_no_mips() { + ; CHECK: %[[HANDLE:.*]] = call %dx.types.Handle @dx.op.createHandleFromBinding(i32 217, + ; CHECK-NEXT: %[[ANNOT_HANDLE:.*]] = call %dx.types.Handle @dx.op.annotateHandle(i32 216, %dx.types.Handle %[[HANDLE]] + %handle = call target("dx.TypedBuffer", <4 x float>, 0, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null) + + ; CHECK-NEXT: %[[RETVAL:.*]] = call %dx.types.Dimensions @dx.op.getDimensions(i32 72, %dx.types.Handle %[[ANNOT_HANDLE]], i32 undef) + ; CHECK-NEXT: %[[DIM:.*]] = extractvalue %dx.types.Dimensions %[[RETVAL]], 0 + %1 = call i32 @llvm.dx.resource.getdimensions.x(target("dx.TypedBuffer", <4 x float>, 0, 0, 0) %handle) + + ; CHECK-NEXT: ret i32 %[[DIM]] + ret i32 %1 +} diff --git a/llvm/test/CodeGen/Hexagon/swp-many-stores.mir b/llvm/test/CodeGen/Hexagon/swp-many-stores.mir new file mode 100644 index 0000000..bf14dcf --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/swp-many-stores.mir @@ -0,0 +1,88 @@ +# RUN: llc -run-pass pipeliner -debug-only=pipeliner %s -o /dev/null -pipeliner-max-num-stores=5 2>&1 | FileCheck %s +# REQUIRES: asserts + +# This loop has six stores, which exceeds the limit set by +# `pipeliner-max-num-stores`. + +# CHECK: Too many stores + +--- | + target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" + target triple = "hexagon-unknown-linux-musl" + + define void @f(ptr %a, i32 %n) #0 { + entry: + %guard = icmp sgt i32 %n, 0 + %btc = sub nsw i32 %n, 1 + br i1 %guard, label %loop.preheader, label %exit + + loop.preheader: ; preds = %entry + %0 = add i32 %n, 1 + %cgep = getelementptr i8, ptr %a, i32 %0 + br label %loop + + loop: ; preds = %loop.preheader, %loop + %lsr.iv = phi ptr [ %cgep, %loop.preheader ], [ %cgep8, %loop ] + %i = phi i32 [ %i.dec, %loop ], [ %btc, %loop.preheader ] + %cgep7 = getelementptr i8, ptr %lsr.iv, i32 -2 + store i8 0, ptr %cgep7, align 1 + %cgep8 = getelementptr i8, ptr %lsr.iv, i32 -1 + store i8 1, ptr %cgep8, align 1 + store i8 2, ptr %lsr.iv, align 1 + %cgep9 = getelementptr i8, ptr %lsr.iv, i32 1 + store i8 3, ptr %cgep9, align 1 + %cgep10 = getelementptr i8, ptr %lsr.iv, i32 2 + store i8 4, ptr %cgep10, align 1 + %cgep11 = getelementptr i8, ptr %lsr.iv, i32 3 + store i8 5, ptr %cgep11, align 1 + %i.dec = sub i32 %i, 1 + %ec = icmp eq i32 %i.dec, 0 + br i1 %ec, label %exit, label %loop + + exit: ; preds = %loop, %entry + ret void + } + + attributes #0 = { "target-cpu"="hexagonv79" } +... +--- +name: f +tracksRegLiveness: true +body: | + bb.0.entry: + successors: %bb.1(0x50000000), %bb.3(0x30000000) + liveins: $r0, $r1 + + %7:intregs = COPY $r1 + %6:intregs = COPY $r0 + %8:predregs = C2_cmpgti %7, 0 + J2_jumpf %8, %bb.3, implicit-def dead $pc + J2_jump %bb.1, implicit-def dead $pc + + bb.1.loop.preheader: + successors: %bb.2(0x80000000) + + %0:intregs = A2_addi %7, -1 + %1:intregs = S4_addaddi %7, %6, 1 + %10:intregs = A2_tfrsi 0 + %11:intregs = A2_tfrsi 1 + %14:intregs = COPY %0 + J2_loop0r %bb.2, %14, implicit-def $lc0, implicit-def $sa0, implicit-def $usr + + bb.2.loop (machine-block-address-taken): + successors: %bb.3(0x04000000), %bb.2(0x7c000000) + + %2:intregs = PHI %1, %bb.1, %4, %bb.2 + S2_storerb_io %2, -2, %10 :: (store (s8) into %ir.cgep7) + %4:intregs = A2_addi %2, -1 + S2_storerb_io %2, -1, %11 :: (store (s8) into %ir.cgep8) + S4_storeirb_io %2, 0, 2 :: (store (s8) into %ir.lsr.iv) + S4_storeirb_io %2, 1, 3 :: (store (s8) into %ir.cgep9) + S4_storeirb_io %2, 2, 4 :: (store (s8) into %ir.cgep10) + S4_storeirb_io %2, 3, 5 :: (store (s8) into %ir.cgep11) + ENDLOOP0 %bb.2, implicit-def $pc, implicit-def $lc0, implicit $sa0, implicit $lc0 + J2_jump %bb.3, implicit-def dead $pc + + bb.3.exit: + PS_jmpret $r31, implicit-def dead $pc +... diff --git a/llvm/test/CodeGen/LoongArch/calling-conv-half.ll b/llvm/test/CodeGen/LoongArch/calling-conv-half.ll index d111cf2..50f7d40 100644 --- a/llvm/test/CodeGen/LoongArch/calling-conv-half.ll +++ b/llvm/test/CodeGen/LoongArch/calling-conv-half.ll @@ -284,7 +284,6 @@ define i32 @caller_half_in_fregs() nounwind { ; LA64S-NEXT: addi.d $sp, $sp, -16 ; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill ; LA64S-NEXT: lu12i.w $a0, -12 -; LA64S-NEXT: lu32i.d $a0, 0 ; LA64S-NEXT: movgr2fr.w $fa0, $a0 ; LA64S-NEXT: ori $a0, $zero, 1 ; LA64S-NEXT: ori $a1, $zero, 2 @@ -326,7 +325,6 @@ define i32 @caller_half_in_fregs() nounwind { ; LA64F-LP64D-NEXT: addi.d $sp, $sp, -16 ; LA64F-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill ; LA64F-LP64D-NEXT: lu12i.w $a0, -12 -; LA64F-LP64D-NEXT: lu32i.d $a0, 0 ; LA64F-LP64D-NEXT: movgr2fr.w $fa0, $a0 ; LA64F-LP64D-NEXT: ori $a0, $zero, 1 ; LA64F-LP64D-NEXT: ori $a1, $zero, 2 @@ -368,7 +366,6 @@ define i32 @caller_half_in_fregs() nounwind { ; LA64D-LP64D-NEXT: addi.d $sp, $sp, -16 ; LA64D-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill ; LA64D-LP64D-NEXT: lu12i.w $a0, -12 -; LA64D-LP64D-NEXT: lu32i.d $a0, 0 ; LA64D-LP64D-NEXT: movgr2fr.w $fa0, $a0 ; LA64D-LP64D-NEXT: ori $a0, $zero, 1 ; LA64D-LP64D-NEXT: ori $a1, $zero, 2 @@ -688,32 +685,23 @@ define i32 @caller_half_in_gregs() nounwind { ; LA64S-NEXT: addi.d $sp, $sp, -16 ; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill ; LA64S-NEXT: lu12i.w $a1, -12 +; LA64S-NEXT: movgr2fr.w $fa1, $a1 ; LA64S-NEXT: ori $a0, $a1, 2176 +; LA64S-NEXT: lu12i.w $a2, -13 +; LA64S-NEXT: ori $a2, $a2, 3072 +; LA64S-NEXT: movgr2fr.w $fa0, $a2 ; LA64S-NEXT: ori $a2, $a1, 512 -; LA64S-NEXT: ori $a3, $a1, 1024 -; LA64S-NEXT: ori $a4, $a1, 1280 -; LA64S-NEXT: ori $a5, $a1, 1536 -; LA64S-NEXT: ori $a6, $a1, 1792 -; LA64S-NEXT: ori $a7, $a1, 2048 -; LA64S-NEXT: lu32i.d $a1, 0 -; LA64S-NEXT: movgr2fr.w $fa1, $a1 -; LA64S-NEXT: lu12i.w $a1, -13 -; LA64S-NEXT: ori $a1, $a1, 3072 -; LA64S-NEXT: lu32i.d $a1, 0 -; LA64S-NEXT: movgr2fr.w $fa0, $a1 -; LA64S-NEXT: lu32i.d $a2, 0 ; LA64S-NEXT: movgr2fr.w $fa2, $a2 -; LA64S-NEXT: lu32i.d $a3, 0 -; LA64S-NEXT: movgr2fr.w $fa3, $a3 -; LA64S-NEXT: lu32i.d $a4, 0 -; LA64S-NEXT: movgr2fr.w $fa4, $a4 -; LA64S-NEXT: lu32i.d $a5, 0 -; LA64S-NEXT: movgr2fr.w $fa5, $a5 -; LA64S-NEXT: lu32i.d $a0, 0 -; LA64S-NEXT: lu32i.d $a6, 0 -; LA64S-NEXT: movgr2fr.w $fa6, $a6 -; LA64S-NEXT: lu32i.d $a7, 0 -; LA64S-NEXT: movgr2fr.w $fa7, $a7 +; LA64S-NEXT: ori $a2, $a1, 1024 +; LA64S-NEXT: movgr2fr.w $fa3, $a2 +; LA64S-NEXT: ori $a2, $a1, 1280 +; LA64S-NEXT: movgr2fr.w $fa4, $a2 +; LA64S-NEXT: ori $a2, $a1, 1536 +; LA64S-NEXT: movgr2fr.w $fa5, $a2 +; LA64S-NEXT: ori $a2, $a1, 1792 +; LA64S-NEXT: movgr2fr.w $fa6, $a2 +; LA64S-NEXT: ori $a1, $a1, 2048 +; LA64S-NEXT: movgr2fr.w $fa7, $a1 ; LA64S-NEXT: ori $a1, $zero, 10 ; LA64S-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs) ; LA64S-NEXT: jirl $ra, $ra, 0 @@ -730,22 +718,14 @@ define i32 @caller_half_in_gregs() nounwind { ; LA64F-LP64S-NEXT: lu12i.w $a1, -12 ; LA64F-LP64S-NEXT: ori $t0, $a1, 2176 ; LA64F-LP64S-NEXT: lu32i.d $t0, 0 +; LA64F-LP64S-NEXT: lu12i.w $a0, -13 +; LA64F-LP64S-NEXT: ori $a0, $a0, 3072 ; LA64F-LP64S-NEXT: ori $a2, $a1, 512 ; LA64F-LP64S-NEXT: ori $a3, $a1, 1024 ; LA64F-LP64S-NEXT: ori $a4, $a1, 1280 ; LA64F-LP64S-NEXT: ori $a5, $a1, 1536 ; LA64F-LP64S-NEXT: ori $a6, $a1, 1792 ; LA64F-LP64S-NEXT: ori $a7, $a1, 2048 -; LA64F-LP64S-NEXT: lu32i.d $a1, 0 -; LA64F-LP64S-NEXT: lu12i.w $a0, -13 -; LA64F-LP64S-NEXT: ori $a0, $a0, 3072 -; LA64F-LP64S-NEXT: lu32i.d $a0, 0 -; LA64F-LP64S-NEXT: lu32i.d $a2, 0 -; LA64F-LP64S-NEXT: lu32i.d $a3, 0 -; LA64F-LP64S-NEXT: lu32i.d $a4, 0 -; LA64F-LP64S-NEXT: lu32i.d $a5, 0 -; LA64F-LP64S-NEXT: lu32i.d $a6, 0 -; LA64F-LP64S-NEXT: lu32i.d $a7, 0 ; LA64F-LP64S-NEXT: st.w $t0, $sp, 0 ; LA64F-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs) ; LA64F-LP64S-NEXT: jirl $ra, $ra, 0 @@ -758,32 +738,23 @@ define i32 @caller_half_in_gregs() nounwind { ; LA64F-LP64D-NEXT: addi.d $sp, $sp, -16 ; LA64F-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill ; LA64F-LP64D-NEXT: lu12i.w $a1, -12 +; LA64F-LP64D-NEXT: movgr2fr.w $fa1, $a1 ; LA64F-LP64D-NEXT: ori $a0, $a1, 2176 +; LA64F-LP64D-NEXT: lu12i.w $a2, -13 +; LA64F-LP64D-NEXT: ori $a2, $a2, 3072 +; LA64F-LP64D-NEXT: movgr2fr.w $fa0, $a2 ; LA64F-LP64D-NEXT: ori $a2, $a1, 512 -; LA64F-LP64D-NEXT: ori $a3, $a1, 1024 -; LA64F-LP64D-NEXT: ori $a4, $a1, 1280 -; LA64F-LP64D-NEXT: ori $a5, $a1, 1536 -; LA64F-LP64D-NEXT: ori $a6, $a1, 1792 -; LA64F-LP64D-NEXT: ori $a7, $a1, 2048 -; LA64F-LP64D-NEXT: lu32i.d $a1, 0 -; LA64F-LP64D-NEXT: movgr2fr.w $fa1, $a1 -; LA64F-LP64D-NEXT: lu12i.w $a1, -13 -; LA64F-LP64D-NEXT: ori $a1, $a1, 3072 -; LA64F-LP64D-NEXT: lu32i.d $a1, 0 -; LA64F-LP64D-NEXT: movgr2fr.w $fa0, $a1 -; LA64F-LP64D-NEXT: lu32i.d $a2, 0 ; LA64F-LP64D-NEXT: movgr2fr.w $fa2, $a2 -; LA64F-LP64D-NEXT: lu32i.d $a3, 0 -; LA64F-LP64D-NEXT: movgr2fr.w $fa3, $a3 -; LA64F-LP64D-NEXT: lu32i.d $a4, 0 -; LA64F-LP64D-NEXT: movgr2fr.w $fa4, $a4 -; LA64F-LP64D-NEXT: lu32i.d $a5, 0 -; LA64F-LP64D-NEXT: movgr2fr.w $fa5, $a5 -; LA64F-LP64D-NEXT: lu32i.d $a0, 0 -; LA64F-LP64D-NEXT: lu32i.d $a6, 0 -; LA64F-LP64D-NEXT: movgr2fr.w $fa6, $a6 -; LA64F-LP64D-NEXT: lu32i.d $a7, 0 -; LA64F-LP64D-NEXT: movgr2fr.w $fa7, $a7 +; LA64F-LP64D-NEXT: ori $a2, $a1, 1024 +; LA64F-LP64D-NEXT: movgr2fr.w $fa3, $a2 +; LA64F-LP64D-NEXT: ori $a2, $a1, 1280 +; LA64F-LP64D-NEXT: movgr2fr.w $fa4, $a2 +; LA64F-LP64D-NEXT: ori $a2, $a1, 1536 +; LA64F-LP64D-NEXT: movgr2fr.w $fa5, $a2 +; LA64F-LP64D-NEXT: ori $a2, $a1, 1792 +; LA64F-LP64D-NEXT: movgr2fr.w $fa6, $a2 +; LA64F-LP64D-NEXT: ori $a1, $a1, 2048 +; LA64F-LP64D-NEXT: movgr2fr.w $fa7, $a1 ; LA64F-LP64D-NEXT: ori $a1, $zero, 10 ; LA64F-LP64D-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs) ; LA64F-LP64D-NEXT: jirl $ra, $ra, 0 @@ -800,22 +771,14 @@ define i32 @caller_half_in_gregs() nounwind { ; LA64D-LP64S-NEXT: lu12i.w $a1, -12 ; LA64D-LP64S-NEXT: ori $t0, $a1, 2176 ; LA64D-LP64S-NEXT: lu32i.d $t0, 0 +; LA64D-LP64S-NEXT: lu12i.w $a0, -13 +; LA64D-LP64S-NEXT: ori $a0, $a0, 3072 ; LA64D-LP64S-NEXT: ori $a2, $a1, 512 ; LA64D-LP64S-NEXT: ori $a3, $a1, 1024 ; LA64D-LP64S-NEXT: ori $a4, $a1, 1280 ; LA64D-LP64S-NEXT: ori $a5, $a1, 1536 ; LA64D-LP64S-NEXT: ori $a6, $a1, 1792 ; LA64D-LP64S-NEXT: ori $a7, $a1, 2048 -; LA64D-LP64S-NEXT: lu32i.d $a1, 0 -; LA64D-LP64S-NEXT: lu12i.w $a0, -13 -; LA64D-LP64S-NEXT: ori $a0, $a0, 3072 -; LA64D-LP64S-NEXT: lu32i.d $a0, 0 -; LA64D-LP64S-NEXT: lu32i.d $a2, 0 -; LA64D-LP64S-NEXT: lu32i.d $a3, 0 -; LA64D-LP64S-NEXT: lu32i.d $a4, 0 -; LA64D-LP64S-NEXT: lu32i.d $a5, 0 -; LA64D-LP64S-NEXT: lu32i.d $a6, 0 -; LA64D-LP64S-NEXT: lu32i.d $a7, 0 ; LA64D-LP64S-NEXT: st.w $t0, $sp, 0 ; LA64D-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs) ; LA64D-LP64S-NEXT: jirl $ra, $ra, 0 @@ -828,32 +791,23 @@ define i32 @caller_half_in_gregs() nounwind { ; LA64D-LP64D-NEXT: addi.d $sp, $sp, -16 ; LA64D-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill ; LA64D-LP64D-NEXT: lu12i.w $a1, -12 +; LA64D-LP64D-NEXT: movgr2fr.w $fa1, $a1 ; LA64D-LP64D-NEXT: ori $a0, $a1, 2176 +; LA64D-LP64D-NEXT: lu12i.w $a2, -13 +; LA64D-LP64D-NEXT: ori $a2, $a2, 3072 +; LA64D-LP64D-NEXT: movgr2fr.w $fa0, $a2 ; LA64D-LP64D-NEXT: ori $a2, $a1, 512 -; LA64D-LP64D-NEXT: ori $a3, $a1, 1024 -; LA64D-LP64D-NEXT: ori $a4, $a1, 1280 -; LA64D-LP64D-NEXT: ori $a5, $a1, 1536 -; LA64D-LP64D-NEXT: ori $a6, $a1, 1792 -; LA64D-LP64D-NEXT: ori $a7, $a1, 2048 -; LA64D-LP64D-NEXT: lu32i.d $a1, 0 -; LA64D-LP64D-NEXT: movgr2fr.w $fa1, $a1 -; LA64D-LP64D-NEXT: lu12i.w $a1, -13 -; LA64D-LP64D-NEXT: ori $a1, $a1, 3072 -; LA64D-LP64D-NEXT: lu32i.d $a1, 0 -; LA64D-LP64D-NEXT: movgr2fr.w $fa0, $a1 -; LA64D-LP64D-NEXT: lu32i.d $a2, 0 ; LA64D-LP64D-NEXT: movgr2fr.w $fa2, $a2 -; LA64D-LP64D-NEXT: lu32i.d $a3, 0 -; LA64D-LP64D-NEXT: movgr2fr.w $fa3, $a3 -; LA64D-LP64D-NEXT: lu32i.d $a4, 0 -; LA64D-LP64D-NEXT: movgr2fr.w $fa4, $a4 -; LA64D-LP64D-NEXT: lu32i.d $a5, 0 -; LA64D-LP64D-NEXT: movgr2fr.w $fa5, $a5 -; LA64D-LP64D-NEXT: lu32i.d $a0, 0 -; LA64D-LP64D-NEXT: lu32i.d $a6, 0 -; LA64D-LP64D-NEXT: movgr2fr.w $fa6, $a6 -; LA64D-LP64D-NEXT: lu32i.d $a7, 0 -; LA64D-LP64D-NEXT: movgr2fr.w $fa7, $a7 +; LA64D-LP64D-NEXT: ori $a2, $a1, 1024 +; LA64D-LP64D-NEXT: movgr2fr.w $fa3, $a2 +; LA64D-LP64D-NEXT: ori $a2, $a1, 1280 +; LA64D-LP64D-NEXT: movgr2fr.w $fa4, $a2 +; LA64D-LP64D-NEXT: ori $a2, $a1, 1536 +; LA64D-LP64D-NEXT: movgr2fr.w $fa5, $a2 +; LA64D-LP64D-NEXT: ori $a2, $a1, 1792 +; LA64D-LP64D-NEXT: movgr2fr.w $fa6, $a2 +; LA64D-LP64D-NEXT: ori $a1, $a1, 2048 +; LA64D-LP64D-NEXT: movgr2fr.w $fa7, $a1 ; LA64D-LP64D-NEXT: ori $a1, $zero, 10 ; LA64D-LP64D-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs) ; LA64D-LP64D-NEXT: jirl $ra, $ra, 0 @@ -1231,28 +1185,20 @@ define i32 @caller_half_on_stack() nounwind { ; LA64S-NEXT: ori $t0, $a0, 3200 ; LA64S-NEXT: lu32i.d $t0, 0 ; LA64S-NEXT: ori $a1, $a0, 2304 -; LA64S-NEXT: lu32i.d $a1, 0 ; LA64S-NEXT: movgr2fr.w $fa0, $a1 ; LA64S-NEXT: ori $a1, $a0, 2432 -; LA64S-NEXT: lu32i.d $a1, 0 ; LA64S-NEXT: movgr2fr.w $fa1, $a1 ; LA64S-NEXT: ori $a1, $a0, 2560 -; LA64S-NEXT: lu32i.d $a1, 0 ; LA64S-NEXT: movgr2fr.w $fa2, $a1 ; LA64S-NEXT: ori $a1, $a0, 2688 -; LA64S-NEXT: lu32i.d $a1, 0 ; LA64S-NEXT: movgr2fr.w $fa3, $a1 ; LA64S-NEXT: ori $a1, $a0, 2816 -; LA64S-NEXT: lu32i.d $a1, 0 ; LA64S-NEXT: movgr2fr.w $fa4, $a1 ; LA64S-NEXT: ori $a1, $a0, 2944 -; LA64S-NEXT: lu32i.d $a1, 0 ; LA64S-NEXT: movgr2fr.w $fa5, $a1 ; LA64S-NEXT: ori $a1, $a0, 3072 -; LA64S-NEXT: lu32i.d $a1, 0 ; LA64S-NEXT: movgr2fr.w $fa6, $a1 ; LA64S-NEXT: ori $a0, $a0, 3136 -; LA64S-NEXT: lu32i.d $a0, 0 ; LA64S-NEXT: movgr2fr.w $fa7, $a0 ; LA64S-NEXT: ori $a0, $zero, 1 ; LA64S-NEXT: ori $a1, $zero, 2 @@ -1323,28 +1269,20 @@ define i32 @caller_half_on_stack() nounwind { ; LA64F-LP64D-NEXT: ori $t0, $a0, 3200 ; LA64F-LP64D-NEXT: lu32i.d $t0, 0 ; LA64F-LP64D-NEXT: ori $a1, $a0, 2304 -; LA64F-LP64D-NEXT: lu32i.d $a1, 0 ; LA64F-LP64D-NEXT: movgr2fr.w $fa0, $a1 ; LA64F-LP64D-NEXT: ori $a1, $a0, 2432 -; LA64F-LP64D-NEXT: lu32i.d $a1, 0 ; LA64F-LP64D-NEXT: movgr2fr.w $fa1, $a1 ; LA64F-LP64D-NEXT: ori $a1, $a0, 2560 -; LA64F-LP64D-NEXT: lu32i.d $a1, 0 ; LA64F-LP64D-NEXT: movgr2fr.w $fa2, $a1 ; LA64F-LP64D-NEXT: ori $a1, $a0, 2688 -; LA64F-LP64D-NEXT: lu32i.d $a1, 0 ; LA64F-LP64D-NEXT: movgr2fr.w $fa3, $a1 ; LA64F-LP64D-NEXT: ori $a1, $a0, 2816 -; LA64F-LP64D-NEXT: lu32i.d $a1, 0 ; LA64F-LP64D-NEXT: movgr2fr.w $fa4, $a1 ; LA64F-LP64D-NEXT: ori $a1, $a0, 2944 -; LA64F-LP64D-NEXT: lu32i.d $a1, 0 ; LA64F-LP64D-NEXT: movgr2fr.w $fa5, $a1 ; LA64F-LP64D-NEXT: ori $a1, $a0, 3072 -; LA64F-LP64D-NEXT: lu32i.d $a1, 0 ; LA64F-LP64D-NEXT: movgr2fr.w $fa6, $a1 ; LA64F-LP64D-NEXT: ori $a0, $a0, 3136 -; LA64F-LP64D-NEXT: lu32i.d $a0, 0 ; LA64F-LP64D-NEXT: movgr2fr.w $fa7, $a0 ; LA64F-LP64D-NEXT: ori $a0, $zero, 1 ; LA64F-LP64D-NEXT: ori $a1, $zero, 2 @@ -1415,28 +1353,20 @@ define i32 @caller_half_on_stack() nounwind { ; LA64D-LP64D-NEXT: ori $t0, $a0, 3200 ; LA64D-LP64D-NEXT: lu32i.d $t0, 0 ; LA64D-LP64D-NEXT: ori $a1, $a0, 2304 -; LA64D-LP64D-NEXT: lu32i.d $a1, 0 ; LA64D-LP64D-NEXT: movgr2fr.w $fa0, $a1 ; LA64D-LP64D-NEXT: ori $a1, $a0, 2432 -; LA64D-LP64D-NEXT: lu32i.d $a1, 0 ; LA64D-LP64D-NEXT: movgr2fr.w $fa1, $a1 ; LA64D-LP64D-NEXT: ori $a1, $a0, 2560 -; LA64D-LP64D-NEXT: lu32i.d $a1, 0 ; LA64D-LP64D-NEXT: movgr2fr.w $fa2, $a1 ; LA64D-LP64D-NEXT: ori $a1, $a0, 2688 -; LA64D-LP64D-NEXT: lu32i.d $a1, 0 ; LA64D-LP64D-NEXT: movgr2fr.w $fa3, $a1 ; LA64D-LP64D-NEXT: ori $a1, $a0, 2816 -; LA64D-LP64D-NEXT: lu32i.d $a1, 0 ; LA64D-LP64D-NEXT: movgr2fr.w $fa4, $a1 ; LA64D-LP64D-NEXT: ori $a1, $a0, 2944 -; LA64D-LP64D-NEXT: lu32i.d $a1, 0 ; LA64D-LP64D-NEXT: movgr2fr.w $fa5, $a1 ; LA64D-LP64D-NEXT: ori $a1, $a0, 3072 -; LA64D-LP64D-NEXT: lu32i.d $a1, 0 ; LA64D-LP64D-NEXT: movgr2fr.w $fa6, $a1 ; LA64D-LP64D-NEXT: ori $a0, $a0, 3136 -; LA64D-LP64D-NEXT: lu32i.d $a0, 0 ; LA64D-LP64D-NEXT: movgr2fr.w $fa7, $a0 ; LA64D-LP64D-NEXT: ori $a0, $zero, 1 ; LA64D-LP64D-NEXT: ori $a1, $zero, 2 @@ -1493,7 +1423,6 @@ define half @callee_half_ret() nounwind { ; LA64S: # %bb.0: ; LA64S-NEXT: lu12i.w $a0, -13 ; LA64S-NEXT: ori $a0, $a0, 3072 -; LA64S-NEXT: lu32i.d $a0, 0 ; LA64S-NEXT: movgr2fr.w $fa0, $a0 ; LA64S-NEXT: ret ; @@ -1501,14 +1430,12 @@ define half @callee_half_ret() nounwind { ; LA64F-LP64S: # %bb.0: ; LA64F-LP64S-NEXT: lu12i.w $a0, -13 ; LA64F-LP64S-NEXT: ori $a0, $a0, 3072 -; LA64F-LP64S-NEXT: lu32i.d $a0, 0 ; LA64F-LP64S-NEXT: ret ; ; LA64F-LP64D-LABEL: callee_half_ret: ; LA64F-LP64D: # %bb.0: ; LA64F-LP64D-NEXT: lu12i.w $a0, -13 ; LA64F-LP64D-NEXT: ori $a0, $a0, 3072 -; LA64F-LP64D-NEXT: lu32i.d $a0, 0 ; LA64F-LP64D-NEXT: movgr2fr.w $fa0, $a0 ; LA64F-LP64D-NEXT: ret ; @@ -1516,14 +1443,12 @@ define half @callee_half_ret() nounwind { ; LA64D-LP64S: # %bb.0: ; LA64D-LP64S-NEXT: lu12i.w $a0, -13 ; LA64D-LP64S-NEXT: ori $a0, $a0, 3072 -; LA64D-LP64S-NEXT: lu32i.d $a0, 0 ; LA64D-LP64S-NEXT: ret ; ; LA64D-LP64D-LABEL: callee_half_ret: ; LA64D-LP64D: # %bb.0: ; LA64D-LP64D-NEXT: lu12i.w $a0, -13 ; LA64D-LP64D-NEXT: ori $a0, $a0, 3072 -; LA64D-LP64D-NEXT: lu32i.d $a0, 0 ; LA64D-LP64D-NEXT: movgr2fr.w $fa0, $a0 ; LA64D-LP64D-NEXT: ret ret half 1.0 diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll index a6e3f79..0d0fb21 100644 --- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll @@ -76,7 +76,6 @@ define float @float_fsub_acquire(ptr %p) nounwind { ; LA64F: # %bb.0: ; LA64F-NEXT: fld.s $fa0, $a0, 0 ; LA64F-NEXT: lu12i.w $a1, -264192 -; LA64F-NEXT: lu32i.d $a1, 0 ; LA64F-NEXT: movgr2fr.w $fa1, $a1 ; LA64F-NEXT: .p2align 4, , 16 ; LA64F-NEXT: .LBB1_1: # %atomicrmw.start @@ -641,7 +640,6 @@ define float @float_fsub_release(ptr %p) nounwind { ; LA64F: # %bb.0: ; LA64F-NEXT: fld.s $fa0, $a0, 0 ; LA64F-NEXT: lu12i.w $a1, -264192 -; LA64F-NEXT: lu32i.d $a1, 0 ; LA64F-NEXT: movgr2fr.w $fa1, $a1 ; LA64F-NEXT: .p2align 4, , 16 ; LA64F-NEXT: .LBB9_1: # %atomicrmw.start @@ -1206,7 +1204,6 @@ define float @float_fsub_acq_rel(ptr %p) nounwind { ; LA64F: # %bb.0: ; LA64F-NEXT: fld.s $fa0, $a0, 0 ; LA64F-NEXT: lu12i.w $a1, -264192 -; LA64F-NEXT: lu32i.d $a1, 0 ; LA64F-NEXT: movgr2fr.w $fa1, $a1 ; LA64F-NEXT: .p2align 4, , 16 ; LA64F-NEXT: .LBB17_1: # %atomicrmw.start @@ -1771,7 +1768,6 @@ define float @float_fsub_seq_cst(ptr %p) nounwind { ; LA64F: # %bb.0: ; LA64F-NEXT: fld.s $fa0, $a0, 0 ; LA64F-NEXT: lu12i.w $a1, -264192 -; LA64F-NEXT: lu32i.d $a1, 0 ; LA64F-NEXT: movgr2fr.w $fa1, $a1 ; LA64F-NEXT: .p2align 4, , 16 ; LA64F-NEXT: .LBB25_1: # %atomicrmw.start @@ -2336,7 +2332,6 @@ define float @float_fsub_monotonic(ptr %p) nounwind { ; LA64F: # %bb.0: ; LA64F-NEXT: fld.s $fa0, $a0, 0 ; LA64F-NEXT: lu12i.w $a1, -264192 -; LA64F-NEXT: lu32i.d $a1, 0 ; LA64F-NEXT: movgr2fr.w $fa1, $a1 ; LA64F-NEXT: .p2align 4, , 16 ; LA64F-NEXT: .LBB33_1: # %atomicrmw.start diff --git a/llvm/test/CodeGen/LoongArch/lasx/vselect.ll b/llvm/test/CodeGen/LoongArch/lasx/vselect.ll index bf31ccb..559cc53 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/vselect.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/vselect.ll @@ -32,6 +32,40 @@ define void @select_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind { ret void } +define void @select_v32i8_1(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v32i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI2_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI2_0) +; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <32 x i8>, ptr %a0 + %v1 = load <32 x i8>, ptr %a1 + %sel = select <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <32 x i8> %v0, <32 x i8> %v1 + store <32 x i8> %sel, ptr %res + ret void +} + +define void @select_v32i8_2(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v32i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI3_0) +; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <32 x i8>, ptr %a0 + %v1 = load <32 x i8>, ptr %a1 + %sel = select <32 x i1> <i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <32 x i8> %v0, <32 x i8> %v1 + store <32 x i8> %sel, ptr %res + ret void +} + define void @select_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind { ; CHECK-LABEL: select_v16i16: ; CHECK: # %bb.0: @@ -49,6 +83,40 @@ define void @select_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind { ret void } +define void @select_v16i16_1(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v16i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI5_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI5_0) +; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <16 x i16>, ptr %a0 + %v1 = load <16 x i16>, ptr %a1 + %sel = select <16 x i1> <i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i16> %v0, <16 x i16> %v1 + store <16 x i16> %sel, ptr %res + ret void +} + +define void @select_v16i16_2(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v16i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI6_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI6_0) +; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <16 x i16>, ptr %a0 + %v1 = load <16 x i16>, ptr %a1 + %sel = select <16 x i1> <i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x i16> %v0, <16 x i16> %v1 + store <16 x i16> %sel, ptr %res + ret void +} + define void @select_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind { ; CHECK-LABEL: select_v8i32: ; CHECK: # %bb.0: @@ -65,19 +133,70 @@ define void @select_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind { ret void } +define void @select_v8i32_1(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI8_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI8_0) +; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <8 x i32>, ptr %a0 + %v1 = load <8 x i32>, ptr %a1 + %sel = select <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <8 x i32> %v0, <8 x i32> %v1 + store <8 x i32> %sel, ptr %res + ret void +} + +define void @select_v8f32(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI9_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI9_0) +; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <8 x float>, ptr %a0 + %v1 = load <8 x float>, ptr %a1 + %sel = select <8 x i1> <i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false>, <8 x float> %v0, <8 x float> %v1 + store <8 x float> %sel, ptr %res + ret void +} + define void @select_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind { ; CHECK-LABEL: select_v4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI4_0) -; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI4_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI10_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI10_0) ; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret %v0 = load <4 x i64>, ptr %a0 %v1 = load <4 x i64>, ptr %a1 - %sel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i64> %v0, <4 x i64> %v1 + %sel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> %v0, <4 x i64> %v1 store <4 x i64> %sel, ptr %res ret void } + +define void @select_v4f64(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI11_0) +; CHECK-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI11_0) +; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <4 x double>, ptr %a0 + %v1 = load <4 x double>, ptr %a1 + %sel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x double> %v0, <4 x double> %v1 + store <4 x double> %sel, ptr %res + ret void +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/vselect.ll b/llvm/test/CodeGen/LoongArch/lsx/vselect.ll index 8f25a6b..25c4f09 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vselect.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vselect.ll @@ -16,6 +16,20 @@ define void @select_v16i8_imm(ptr %res, ptr %a0) nounwind { ret void } +define void @select_v16i8_imm_1(ptr %res, ptr %a0) nounwind { +; CHECK-LABEL: select_v16i8_imm_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vrepli.h $vr1, -256 +; CHECK-NEXT: vbitseli.b $vr1, $vr0, 1 +; CHECK-NEXT: vst $vr1, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <16 x i8>, ptr %a0 + %sel = select <16 x i1> <i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %v0 + store <16 x i8> %sel, ptr %res + ret void +} + define void @select_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind { ; CHECK-LABEL: select_v16i8: ; CHECK: # %bb.0: @@ -32,6 +46,40 @@ define void @select_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind { ret void } +define void @select_v16i8_1(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v16i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) +; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI3_0) +; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <16 x i8>, ptr %a0 + %v1 = load <16 x i8>, ptr %a1 + %sel = select <16 x i1> <i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> %v0, <16 x i8> %v1 + store <16 x i8> %sel, ptr %res + ret void +} + +define void @select_v16i8_2(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v16i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI4_0) +; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI4_0) +; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <16 x i8>, ptr %a0 + %v1 = load <16 x i8>, ptr %a1 + %sel = select <16 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false>, <16 x i8> %v0, <16 x i8> %v1 + store <16 x i8> %sel, ptr %res + ret void +} + define void @select_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind { ; CHECK-LABEL: select_v8i16: ; CHECK: # %bb.0: @@ -49,6 +97,40 @@ define void @select_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind { ret void } +define void @select_v8i16_1(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI6_0) +; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI6_0) +; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <8 x i16>, ptr %a0 + %v1 = load <8 x i16>, ptr %a1 + %sel = select <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %v0, <8 x i16> %v1 + store <8 x i16> %sel, ptr %res + ret void +} + +define void @select_v8i16_2(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI7_0) +; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI7_0) +; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <8 x i16>, ptr %a0 + %v1 = load <8 x i16>, ptr %a1 + %sel = select <8 x i1> <i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %v0, <8 x i16> %v1 + store <8 x i16> %sel, ptr %res + ret void +} + define void @select_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind { ; CHECK-LABEL: select_v4i32: ; CHECK: # %bb.0: @@ -65,13 +147,47 @@ define void @select_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind { ret void } +define void @select_v4i32_1(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v4i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI9_0) +; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI9_0) +; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <4 x i32>, ptr %a0 + %v1 = load <4 x i32>, ptr %a1 + %sel = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x i32> %v0, <4 x i32> %v1 + store <4 x i32> %sel, ptr %res + ret void +} + +define void @select_v4f32(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI10_0) +; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI10_0) +; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <4 x float>, ptr %a0 + %v1 = load <4 x float>, ptr %a1 + %sel = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %v0, <4 x float> %v1 + store <4 x float> %sel, ptr %res + ret void +} + define void @select_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind { ; CHECK-LABEL: select_v2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI4_0) -; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI4_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI11_0) +; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI11_0) ; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret @@ -81,3 +197,20 @@ define void @select_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind { store <2 x i64> %sel, ptr %res ret void } + +define void @select_v2f64(ptr %res, ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: select_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI12_0) +; CHECK-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI12_0) +; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret + %v0 = load <2 x double>, ptr %a0 + %v1 = load <2 x double>, ptr %a1 + %sel = select <2 x i1> <i1 false, i1 true>, <2 x double> %v0, <2 x double> %v1 + store <2 x double> %sel, ptr %res + ret void +} diff --git a/llvm/test/CodeGen/MIR/AArch64/return-address-signing.mir b/llvm/test/CodeGen/MIR/AArch64/return-address-signing.mir index 1030917..302f70f 100644 --- a/llvm/test/CodeGen/MIR/AArch64/return-address-signing.mir +++ b/llvm/test/CodeGen/MIR/AArch64/return-address-signing.mir @@ -1,4 +1,4 @@ -# RUN: llc -mtriple=aarch64 -run-pass=prologepilog -run-pass=aarch64-ptrauth -o - %s 2>&1 | FileCheck %s +# RUN: llc -mtriple=aarch64 -run-pass=prologepilog -run-pass=aarch64-ptrauth -o - %s 2>&1 | FileCheck --strict-whitespace %s --- | target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64" diff --git a/llvm/test/CodeGen/NVPTX/i32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/i32x2-instructions.ll index 153ca10..72f10ae 100644 --- a/llvm/test/CodeGen/NVPTX/i32x2-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/i32x2-instructions.ll @@ -1141,29 +1141,88 @@ define <2 x i32> @test_select_cc(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x ret <2 x i32> %r } -define <2 x i16> @test_trunc_2xi32(<2 x i32> %a) #0 { -; CHECK-NOI32X2-LABEL: test_trunc_2xi32( +define <2 x i16> @test_trunc_2xi32_to_2xi16(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_trunc_2xi32_to_2xi16( ; CHECK-NOI32X2: { ; CHECK-NOI32X2-NEXT: .reg .b32 %r<4>; ; CHECK-NOI32X2-EMPTY: ; CHECK-NOI32X2-NEXT: // %bb.0: -; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_trunc_2xi32_param_0]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_trunc_2xi32_to_2xi16_param_0]; ; CHECK-NOI32X2-NEXT: prmt.b32 %r3, %r1, %r2, 0x5410U; ; CHECK-NOI32X2-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NOI32X2-NEXT: ret; ; -; CHECK-I32X2-LABEL: test_trunc_2xi32( +; CHECK-I32X2-LABEL: test_trunc_2xi32_to_2xi16( ; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<4>; ; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; ; CHECK-I32X2-EMPTY: ; CHECK-I32X2-NEXT: // %bb.0: -; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_trunc_2xi32_param_0]; -; CHECK-I32X2-NEXT: st.param.b32 [func_retval0], %rd1; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_trunc_2xi32_to_2xi16_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: prmt.b32 %r3, %r1, %r2, 0x5410U; +; CHECK-I32X2-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-I32X2-NEXT: ret; %r = trunc <2 x i32> %a to <2 x i16> ret <2 x i16> %r } +define <2 x i8> @test_trunc_2xi32_to_2xi8(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_trunc_2xi32_to_2xi8( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b16 %rs<3>; +; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_trunc_2xi32_to_2xi8_param_0]; +; CHECK-NOI32X2-NEXT: cvt.u16.u32 %rs1, %r2; +; CHECK-NOI32X2-NEXT: cvt.u16.u32 %rs2, %r1; +; CHECK-NOI32X2-NEXT: st.param.v2.b8 [func_retval0], {%rs2, %rs1}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_trunc_2xi32_to_2xi8( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b16 %rs<3>; +; CHECK-I32X2-NEXT: .reg .b32 %r<3>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_trunc_2xi32_to_2xi8_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: cvt.u16.u32 %rs1, %r2; +; CHECK-I32X2-NEXT: cvt.u16.u32 %rs2, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b8 [func_retval0], {%rs2, %rs1}; +; CHECK-I32X2-NEXT: ret; + %r = trunc <2 x i32> %a to <2 x i8> + ret <2 x i8> %r +} + +define <2 x i1> @test_trunc_2xi32_to_2xi1(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_trunc_2xi32_to_2xi1( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_trunc_2xi32_to_2xi1_param_0]; +; CHECK-NOI32X2-NEXT: st.param.b8 [func_retval0], %r1; +; CHECK-NOI32X2-NEXT: st.param.b8 [func_retval0+1], %r2; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_trunc_2xi32_to_2xi1( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<3>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_trunc_2xi32_to_2xi1_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: st.param.b8 [func_retval0], %r1; +; CHECK-I32X2-NEXT: st.param.b8 [func_retval0+1], %r2; +; CHECK-I32X2-NEXT: ret; + %r = trunc <2 x i32> %a to <2 x i1> + ret <2 x i1> %r +} + define <2 x i32> @test_trunc_2xi64(<2 x i64> %a) #0 { ; CHECK-LABEL: test_trunc_2xi64( ; CHECK: { @@ -1180,14 +1239,14 @@ define <2 x i32> @test_trunc_2xi64(<2 x i64> %a) #0 { ret <2 x i32> %r } -define <2 x i32> @test_zext_2xi32(<2 x i16> %a) #0 { -; CHECK-LABEL: test_zext_2xi32( +define <2 x i32> @test_zext_2xi16_to_2xi32(<2 x i16> %a) #0 { +; CHECK-LABEL: test_zext_2xi16_to_2xi32( ; CHECK: { ; CHECK-NEXT: .reg .b16 %rs<3>; ; CHECK-NEXT: .reg .b32 %r<4>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r1, [test_zext_2xi32_param_0]; +; CHECK-NEXT: ld.param.b32 %r1, [test_zext_2xi16_to_2xi32_param_0]; ; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1; ; CHECK-NEXT: cvt.u32.u16 %r2, %rs2; ; CHECK-NEXT: cvt.u32.u16 %r3, %rs1; @@ -1197,6 +1256,47 @@ define <2 x i32> @test_zext_2xi32(<2 x i16> %a) #0 { ret <2 x i32> %r } +define <2 x i32> @test_zext_2xi8_to_2xi32(<2 x i8> %a) #0 { +; CHECK-LABEL: test_zext_2xi8_to_2xi32( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [test_zext_2xi8_to_2xi32_param_0]; +; CHECK-NEXT: mov.b32 %r1, {%rs1, %rs2}; +; CHECK-NEXT: cvt.u32.u16 %r2, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r3, %rs1; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r3, %r2}; +; CHECK-NEXT: ret; + %r = zext <2 x i8> %a to <2 x i32> + ret <2 x i32> %r +} + +define <2 x i32> @test_zext_2xi1_to_2xi32(<2 x i1> %a) #0 { +; CHECK-LABEL: test_zext_2xi1_to_2xi32( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .b16 %rs<5>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [test_zext_2xi1_to_2xi32_param_0+1]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p2, %rs2, 0; +; CHECK-NEXT: ld.param.b8 %rs3, [test_zext_2xi1_to_2xi32_param_0]; +; CHECK-NEXT: and.b16 %rs4, %rs3, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs4, 0; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs1; +; CHECK-NEXT: and.b32 %r2, %r1, 1; +; CHECK-NEXT: cvt.u32.u16 %r3, %rs3; +; CHECK-NEXT: and.b32 %r4, %r3, 1; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r2}; +; CHECK-NEXT: ret; + %r = zext <2 x i1> %a to <2 x i32> + ret <2 x i32> %r +} + define <2 x i64> @test_zext_2xi64(<2 x i32> %a) #0 { ; CHECK-NOI32X2-LABEL: test_zext_2xi64( ; CHECK-NOI32X2: { @@ -1566,6 +1666,55 @@ entry: ret void } +define <2 x i32> @test_sext_v2i8_to_v2i32 (<2 x i8> %a) { +; CHECK-LABEL: test_sext_v2i8_to_v2i32( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [test_sext_v2i8_to_v2i32_param_0]; +; CHECK-NEXT: mov.b32 %r1, {%rs1, %rs2}; +; CHECK-NEXT: cvt.u32.u16 %r2, %rs2; +; CHECK-NEXT: cvt.s32.s8 %r3, %r2; +; CHECK-NEXT: cvt.u32.u16 %r4, %rs1; +; CHECK-NEXT: cvt.s32.s8 %r5, %r4; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r5, %r3}; +; CHECK-NEXT: ret; + %r = sext <2 x i8> %a to <2 x i32> + ret <2 x i32> %r +} + +define <2 x i32> @test_sext_v2i16_to_v2i32 (<2 x i16> %a) { +; CHECK-NOI32X2-LABEL: test_sext_v2i16_to_v2i32( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b16 %rs<2>; +; CHECK-NOI32X2-NEXT: .reg .b32 %r<4>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.b32 %r1, [test_sext_v2i16_to_v2i32_param_0]; +; CHECK-NOI32X2-NEXT: cvt.s32.s16 %r2, %r1; +; CHECK-NOI32X2-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r1; } +; CHECK-NOI32X2-NEXT: cvt.s32.s16 %r3, %rs1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r3}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_sext_v2i16_to_v2i32( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b16 %rs<2>; +; CHECK-I32X2-NEXT: .reg .b32 %r<4>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b32 %r1, [test_sext_v2i16_to_v2i32_param_0]; +; CHECK-I32X2-NEXT: cvt.s32.s16 %r2, %r1; +; CHECK-I32X2-NEXT: mov.b32 {_, %rs1}, %r1; +; CHECK-I32X2-NEXT: cvt.s32.s16 %r3, %rs1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r3}; +; CHECK-I32X2-NEXT: ret; + %r = sext <2 x i16> %a to <2 x i32> + ret <2 x i32> %r +} + define <2 x float> @test_uitofp_v2i32(<2 x i32> %a) { ; CHECK-NOI32X2-LABEL: test_uitofp_v2i32( ; CHECK-NOI32X2: { diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-alloc.ll b/llvm/test/CodeGen/NVPTX/tcgen05-alloc.ll index 1edb387..f345e08 100644 --- a/llvm/test/CodeGen/NVPTX/tcgen05-alloc.ll +++ b/llvm/test/CodeGen/NVPTX/tcgen05-alloc.ll @@ -2,9 +2,13 @@ ; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | FileCheck --check-prefixes=CHECK_PTX64 %s ; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 --nvptx-short-ptr | FileCheck --check-prefixes=CHECK_PTX64_SHARED32 %s ; RUN: llc < %s -march=nvptx64 -mcpu=sm_103a -mattr=+ptx88 | FileCheck --check-prefixes=CHECK_PTX64 %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_100f -mattr=+ptx88 | FileCheck --check-prefixes=CHECK_PTX64 %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_110f -mattr=+ptx90 | FileCheck --check-prefixes=CHECK_PTX64 %s ; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | %ptxas-verify -arch=sm_100a %} ; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 --nvptx-short-ptr | %ptxas-verify -arch=sm_100a %} ; RUN: %if ptxas-sm_103a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mcpu=sm_103a -mattr=+ptx88 | %ptxas-verify -arch=sm_103a %} +; RUN: %if ptxas-sm_100f && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mcpu=sm_100f -mattr=+ptx88 | %ptxas-verify -arch=sm_100f %} +; RUN: %if ptxas-sm_110f && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mcpu=sm_110f -mattr=+ptx90 | %ptxas-verify -arch=sm_110f %} declare void @llvm.nvvm.tcgen05.alloc.cg1(ptr %addr, i32 %ncols) diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll b/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll index 2e80c4c..29b130f 100644 --- a/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll +++ b/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll @@ -2,9 +2,13 @@ ; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | FileCheck --check-prefixes=CHECK_PTX64 %s ; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 --nvptx-short-ptr | FileCheck --check-prefixes=CHECK_PTX64_SHARED32 %s ; RUN: llc < %s -march=nvptx64 -mcpu=sm_103a -mattr=+ptx88 | FileCheck --check-prefixes=CHECK_PTX64 %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_100f -mattr=+ptx88 | FileCheck --check-prefixes=CHECK_PTX64 %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_110f -mattr=+ptx90 | FileCheck --check-prefixes=CHECK_PTX64 %s ; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | %ptxas-verify -arch=sm_100a %} ; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 --nvptx-short-ptr | %ptxas-verify -arch=sm_100a %} ; RUN: %if ptxas-sm_103a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mcpu=sm_103a -mattr=+ptx88 | %ptxas-verify -arch=sm_103a %} +; RUN: %if ptxas-sm_100f && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mcpu=sm_100f -mattr=+ptx88 | %ptxas-verify -arch=sm_100f %} +; RUN: %if ptxas-sm_110f && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mcpu=sm_110f -mattr=+ptx90 | %ptxas-verify -arch=sm_110f %} declare void @llvm.nvvm.tcgen05.commit.cg1(ptr %bar_addr) declare void @llvm.nvvm.tcgen05.commit.cg2(ptr %bar_addr) diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-cp.ll b/llvm/test/CodeGen/NVPTX/tcgen05-cp.ll index 817b1d5..4e463a14 100644 --- a/llvm/test/CodeGen/NVPTX/tcgen05-cp.ll +++ b/llvm/test/CodeGen/NVPTX/tcgen05-cp.ll @@ -1,8 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | FileCheck --check-prefixes=CHECK %s ; RUN: llc < %s -march=nvptx64 -mcpu=sm_103a -mattr=+ptx88 | FileCheck --check-prefixes=CHECK %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_100f -mattr=+ptx88 | FileCheck --check-prefixes=CHECK %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_110f -mattr=+ptx90 | FileCheck --check-prefixes=CHECK %s ; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | %ptxas-verify -arch=sm_100a %} ; RUN: %if ptxas-sm_103a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mcpu=sm_103a -mattr=+ptx88 | %ptxas-verify -arch=sm_103a %} +; RUN: %if ptxas-sm_100f && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mcpu=sm_100f -mattr=+ptx88 | %ptxas-verify -arch=sm_100f %} +; RUN: %if ptxas-sm_110f && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mcpu=sm_110f -mattr=+ptx90 | %ptxas-verify -arch=sm_110f %} define void @test_tcgen05_cp_64x128_v1_cg1(ptr addrspace(6) %addr, i64 %sdesc) { ; CHECK-LABEL: test_tcgen05_cp_64x128_v1_cg1( diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-fence.ll b/llvm/test/CodeGen/NVPTX/tcgen05-fence.ll index cbf647f..fc8cce4 100644 --- a/llvm/test/CodeGen/NVPTX/tcgen05-fence.ll +++ b/llvm/test/CodeGen/NVPTX/tcgen05-fence.ll @@ -1,8 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | FileCheck --check-prefixes=CHECK %s ; RUN: llc < %s -march=nvptx64 -mcpu=sm_103a -mattr=+ptx88 | FileCheck --check-prefixes=CHECK %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_100f -mattr=+ptx88 | FileCheck --check-prefixes=CHECK %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_110f -mattr=+ptx90 | FileCheck --check-prefixes=CHECK %s ; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | %ptxas-verify -arch=sm_100a %} ; RUN: %if ptxas-sm_103a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mcpu=sm_103a -mattr=+ptx88 | %ptxas-verify -arch=sm_103a %} +; RUN: %if ptxas-sm_100f && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mcpu=sm_100f -mattr=+ptx88 | %ptxas-verify -arch=sm_100f %} +; RUN: %if ptxas-sm_110f && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mcpu=sm_110f -mattr=+ptx90 | %ptxas-verify -arch=sm_110f %} declare void @llvm.nvvm.tcgen05.fence.before.thread.sync() declare void @llvm.nvvm.tcgen05.fence.after.thread.sync() diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-ld.ll b/llvm/test/CodeGen/NVPTX/tcgen05-ld.ll index a37b1a9..22eb729 100644 --- a/llvm/test/CodeGen/NVPTX/tcgen05-ld.ll +++ b/llvm/test/CodeGen/NVPTX/tcgen05-ld.ll @@ -2,9 +2,13 @@ ; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx86 | FileCheck %s ; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx86 | FileCheck %s ; RUN: llc < %s -o - -mcpu=sm_103a -march=nvptx64 -mattr=+ptx88 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_100f -march=nvptx64 -mattr=+ptx88 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_110f -march=nvptx64 -mattr=+ptx90 | FileCheck %s ; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} ; RUN: %if ptxas-sm_101a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %} ; RUN: %if ptxas-sm_103a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_103a | %ptxas-verify -arch=sm_103a %} +; RUN: %if ptxas-sm_100f && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_100f | %ptxas-verify -arch=sm_100f %} +; RUN: %if ptxas-sm_110f && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mattr=+ptx90 -mcpu=sm_110f | %ptxas-verify -arch=sm_110f %} ; CHECK-LABEL: nvvm_tcgen05_ld_16x64b define void @nvvm_tcgen05_ld_16x64b(ptr addrspace(6) %taddr) { diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-shift.ll b/llvm/test/CodeGen/NVPTX/tcgen05-shift.ll index bf2adac..33483b5 100644 --- a/llvm/test/CodeGen/NVPTX/tcgen05-shift.ll +++ b/llvm/test/CodeGen/NVPTX/tcgen05-shift.ll @@ -1,8 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | FileCheck --check-prefixes=CHECK %s ; RUN: llc < %s -march=nvptx64 -mcpu=sm_103a -mattr=+ptx88 | FileCheck --check-prefixes=CHECK %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_110a -mattr=+ptx90 | FileCheck --check-prefixes=CHECK %s ; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | %ptxas-verify -arch=sm_100a %} ; RUN: %if ptxas-sm_103a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mcpu=sm_103a -mattr=+ptx88 | %ptxas-verify -arch=sm_103a %} +; RUN: %if ptxas-sm_110a && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mcpu=sm_110a -mattr=+ptx90 | %ptxas-verify -arch=sm_110a %} declare void @llvm.nvvm.tcgen05.shift.down.cg1(ptr addrspace(6) %tmem_addr) declare void @llvm.nvvm.tcgen05.shift.down.cg2(ptr addrspace(6) %tmem_addr) diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-st.ll b/llvm/test/CodeGen/NVPTX/tcgen05-st.ll index 0636a06..ccf6541 100644 --- a/llvm/test/CodeGen/NVPTX/tcgen05-st.ll +++ b/llvm/test/CodeGen/NVPTX/tcgen05-st.ll @@ -2,9 +2,13 @@ ; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx86 | FileCheck %s ; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx86 | FileCheck %s ; RUN: llc < %s -o - -mcpu=sm_103a -march=nvptx64 -mattr=+ptx88 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_100f -march=nvptx64 -mattr=+ptx88 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_110f -march=nvptx64 -mattr=+ptx90 | FileCheck %s ; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | %ptxas-verify -arch=sm_100a %} ; RUN: %if ptxas-sm_101a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mcpu=sm_101a -mattr=+ptx86 | %ptxas-verify -arch=sm_101a %} ; RUN: %if ptxas-sm_103a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mcpu=sm_103a -mattr=+ptx88 | %ptxas-verify -arch=sm_103a %} +; RUN: %if ptxas-sm_100f && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mcpu=sm_100f -mattr=+ptx88 | %ptxas-verify -arch=sm_100f %} +; RUN: %if ptxas-sm_110f && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mcpu=sm_110f -mattr=+ptx90 | %ptxas-verify -arch=sm_110f %} ; CHECK-LABEL: nvvm_tcgen05_st_16x64b define void @nvvm_tcgen05_st_16x64b(ptr addrspace(6) %taddr, i32 %stv1, <2 x i32> %stv2, <4 x i32> %stv4, <8 x i32> %stv8, <16 x i32> %stv16, <32 x i32> %stv32, <64 x i32> %stv64, <128 x i32> %stv128) { diff --git a/llvm/test/CodeGen/PowerPC/addition-vector-all-ones.ll b/llvm/test/CodeGen/PowerPC/addition-vector-all-ones.ll new file mode 100644 index 0000000..e67d031 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/addition-vector-all-ones.ll @@ -0,0 +1,60 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s + +; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc64-ibm-aix \ +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s + +; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc-ibm-aix \ +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s + +; The addition of vector `A` with vector of 1s currently uses `vspltisw` to generate vector of 1s followed by add operation. + +; Function for the vector type v2i64 `a + {1, 1}` +define <2 x i64> @test_v2i64(<2 x i64> %a) { +; CHECK-LABEL: test_v2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vspltisw v3, 1 +; CHECK-NEXT: vupklsw v3, v3 +; CHECK-NEXT: vaddudm v2, v2, v3 +; CHECK-NEXT: blr +entry: + %add = add <2 x i64> %a, splat (i64 1) + ret <2 x i64> %add +} + +; Function for the vector type v4i32 `a + {1, 1, 1, 1}` +define <4 x i32> @test_v4i32(<4 x i32> %a) { +; CHECK-LABEL: test_v4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vspltisw v3, 1 +; CHECK-NEXT: vadduwm v2, v2, v3 +; CHECK-NEXT: blr +entry: + %add = add <4 x i32> %a, splat (i32 1) + ret <4 x i32> %add +} + +; Function for the vector type v8i16 `a + {1, 1, 1, 1, 1, 1, 1, 1}` +define <8 x i16> @test_v8i16(<8 x i16> %a) { +; CHECK-LABEL: test_v8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vspltish v3, 1 +; CHECK-NEXT: vadduhm v2, v2, v3 +; CHECK-NEXT: blr +entry: + %add = add <8 x i16> %a, splat (i16 1) + ret <8 x i16> %add +} + +; Function for the vector type v16i8 `a + {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}` +define <16 x i8> @test_16i8(<16 x i8> %a) { +; CHECK-LABEL: test_16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxspltib v3, 1 +; CHECK-NEXT: vaddubm v2, v2, v3 +; CHECK-NEXT: blr +entry: + %add = add <16 x i8> %a, splat (i8 1) + ret <16 x i8> %add +} diff --git a/llvm/test/CodeGen/PowerPC/check-zero-vector.ll b/llvm/test/CodeGen/PowerPC/compare-vector-with-zero.ll index 0f7e0c7..1325abf 100644 --- a/llvm/test/CodeGen/PowerPC/check-zero-vector.ll +++ b/llvm/test/CodeGen/PowerPC/compare-vector-with-zero.ll @@ -95,3 +95,80 @@ declare i4 @llvm.ctpop.i4(i4) #1 !6 = !{!"short", !7, i64 0} !7 = !{!"omnipotent char", !8, i64 0} !8 = !{!"Simple C/C++ TBAA"} + +; Function to lockdown changes for floating point vector comparisons +define range(i32 0, 5) i32 @cols_needed(ptr %colauths){ +; POWERPC_64LE-LABEL: cols_needed: +; POWERPC_64LE: # %bb.0: # %entry +; POWERPC_64LE-NEXT: lxv vs0, 0(r3) +; POWERPC_64LE-NEXT: xxlxor vs1, vs1, vs1 +; POWERPC_64LE-NEXT: li r4, 4 +; POWERPC_64LE-NEXT: li r3, 0 +; POWERPC_64LE-NEXT: xvcmpeqsp vs0, vs0, vs1 +; POWERPC_64LE-NEXT: xxlnor v2, vs0, vs0 +; POWERPC_64LE-NEXT: vextuwrx r4, r4, v2 +; POWERPC_64LE-NEXT: vextuwrx r3, r3, v2 +; POWERPC_64LE-NEXT: rlwinm r4, r4, 1, 30, 30 +; POWERPC_64LE-NEXT: sub r3, r4, r3 +; POWERPC_64LE-NEXT: mfvsrwz r4, v2 +; POWERPC_64LE-NEXT: rlwinm r4, r4, 2, 29, 29 +; POWERPC_64LE-NEXT: or r3, r3, r4 +; POWERPC_64LE-NEXT: li r4, 12 +; POWERPC_64LE-NEXT: vextuwrx r4, r4, v2 +; POWERPC_64LE-NEXT: slwi r4, r4, 3 +; POWERPC_64LE-NEXT: or r3, r3, r4 +; POWERPC_64LE-NEXT: clrlwi r3, r3, 28 +; POWERPC_64LE-NEXT: stb r3, -1(r1) +; POWERPC_64LE-NEXT: lbz r3, -1(r1) +; POWERPC_64LE-NEXT: popcntd r3, r3 +; POWERPC_64LE-NEXT: blr +; +; POWERPC_64-LABEL: cols_needed: +; POWERPC_64: # %bb.0: # %entry +; POWERPC_64-NEXT: lxv vs0, 0(r3) +; POWERPC_64-NEXT: xxlxor vs1, vs1, vs1 +; POWERPC_64-NEXT: li r4, 8 +; POWERPC_64-NEXT: xvcmpeqsp vs0, vs0, vs1 +; POWERPC_64-NEXT: xxlnor v2, vs0, vs0 +; POWERPC_64-NEXT: vextuwlx r4, r4, v2 +; POWERPC_64-NEXT: mfvsrwz r3, v2 +; POWERPC_64-NEXT: rlwinm r4, r4, 1, 30, 30 +; POWERPC_64-NEXT: rlwimi r4, r3, 2, 29, 29 +; POWERPC_64-NEXT: li r3, 0 +; POWERPC_64-NEXT: vextuwlx r3, r3, v2 +; POWERPC_64-NEXT: rlwimi r4, r3, 3, 0, 28 +; POWERPC_64-NEXT: li r3, 12 +; POWERPC_64-NEXT: vextuwlx r3, r3, v2 +; POWERPC_64-NEXT: sub r3, r4, r3 +; POWERPC_64-NEXT: clrlwi r3, r3, 28 +; POWERPC_64-NEXT: stb r3, -1(r1) +; POWERPC_64-NEXT: lbz r3, -1(r1) +; POWERPC_64-NEXT: popcntd r3, r3 +; POWERPC_64-NEXT: blr +; +; POWERPC_32-LABEL: cols_needed: +; POWERPC_32: # %bb.0: # %entry +; POWERPC_32-NEXT: lxv vs0, 0(r3) +; POWERPC_32-NEXT: xxlxor vs1, vs1, vs1 +; POWERPC_32-NEXT: xvcmpeqsp vs0, vs0, vs1 +; POWERPC_32-NEXT: xxlnor vs0, vs0, vs0 +; POWERPC_32-NEXT: stxv vs0, -32(r1) +; POWERPC_32-NEXT: lwz r3, -24(r1) +; POWERPC_32-NEXT: lwz r4, -28(r1) +; POWERPC_32-NEXT: rlwinm r3, r3, 1, 30, 30 +; POWERPC_32-NEXT: rlwimi r3, r4, 2, 29, 29 +; POWERPC_32-NEXT: lwz r4, -32(r1) +; POWERPC_32-NEXT: rlwimi r3, r4, 3, 0, 28 +; POWERPC_32-NEXT: lwz r4, -20(r1) +; POWERPC_32-NEXT: sub r3, r3, r4 +; POWERPC_32-NEXT: clrlwi r3, r3, 28 +; POWERPC_32-NEXT: popcntw r3, r3 +; POWERPC_32-NEXT: blr +entry: + %0 = load <4 x float>, ptr %colauths, align 4, !tbaa !5 + %1 = fcmp une <4 x float> %0, zeroinitializer + %2 = bitcast <4 x i1> %1 to i4 + %3 = tail call range(i4 0, 5) i4 @llvm.ctpop.i4(i4 %2) + %4 = zext nneg i4 %3 to i32 + ret i32 %4 +} diff --git a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll index e71f59c..cad684e 100644 --- a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll +++ b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll @@ -325,24 +325,21 @@ define float @sqrt_afn_ieee(float %x) #0 { ; ; GLOBAL-LABEL: sqrt_afn_ieee: ; GLOBAL: # %bb.0: -; GLOBAL-NEXT: addis 3, 2, .LCPI11_1@toc@ha -; GLOBAL-NEXT: xsabsdp 0, 1 -; GLOBAL-NEXT: lfs 2, .LCPI11_1@toc@l(3) -; GLOBAL-NEXT: fcmpu 0, 0, 2 -; GLOBAL-NEXT: xxlxor 0, 0, 0 -; GLOBAL-NEXT: blt 0, .LBB11_2 -; GLOBAL-NEXT: # %bb.1: ; GLOBAL-NEXT: xsrsqrtesp 0, 1 ; GLOBAL-NEXT: vspltisw 2, -3 ; GLOBAL-NEXT: addis 3, 2, .LCPI11_0@toc@ha -; GLOBAL-NEXT: xvcvsxwdp 2, 34 -; GLOBAL-NEXT: xsmulsp 1, 1, 0 -; GLOBAL-NEXT: xsmaddasp 2, 1, 0 +; GLOBAL-NEXT: xvcvsxwdp 3, 34 +; GLOBAL-NEXT: xsmulsp 2, 1, 0 +; GLOBAL-NEXT: xsabsdp 1, 1 +; GLOBAL-NEXT: xsmaddasp 3, 2, 0 ; GLOBAL-NEXT: lfs 0, .LCPI11_0@toc@l(3) -; GLOBAL-NEXT: xsmulsp 0, 1, 0 -; GLOBAL-NEXT: xsmulsp 0, 0, 2 -; GLOBAL-NEXT: .LBB11_2: -; GLOBAL-NEXT: fmr 1, 0 +; GLOBAL-NEXT: addis 3, 2, .LCPI11_1@toc@ha +; GLOBAL-NEXT: xsmulsp 0, 2, 0 +; GLOBAL-NEXT: lfs 2, .LCPI11_1@toc@l(3) +; GLOBAL-NEXT: xssubsp 1, 1, 2 +; GLOBAL-NEXT: xxlxor 2, 2, 2 +; GLOBAL-NEXT: xsmulsp 0, 0, 3 +; GLOBAL-NEXT: fsel 1, 1, 0, 2 ; GLOBAL-NEXT: blr %rt = call afn ninf float @llvm.sqrt.f32(float %x) ret float %rt @@ -393,21 +390,19 @@ define float @sqrt_afn_preserve_sign(float %x) #1 { ; ; GLOBAL-LABEL: sqrt_afn_preserve_sign: ; GLOBAL: # %bb.0: -; GLOBAL-NEXT: xxlxor 0, 0, 0 -; GLOBAL-NEXT: fcmpu 0, 1, 0 -; GLOBAL-NEXT: beq 0, .LBB13_2 -; GLOBAL-NEXT: # %bb.1: ; GLOBAL-NEXT: xsrsqrtesp 0, 1 ; GLOBAL-NEXT: vspltisw 2, -3 ; GLOBAL-NEXT: addis 3, 2, .LCPI13_0@toc@ha -; GLOBAL-NEXT: xvcvsxwdp 2, 34 -; GLOBAL-NEXT: xsmulsp 1, 1, 0 -; GLOBAL-NEXT: xsmaddasp 2, 1, 0 +; GLOBAL-NEXT: xvcvsxwdp 3, 34 +; GLOBAL-NEXT: xsmulsp 2, 1, 0 +; GLOBAL-NEXT: xsmaddasp 3, 2, 0 ; GLOBAL-NEXT: lfs 0, .LCPI13_0@toc@l(3) -; GLOBAL-NEXT: xsmulsp 0, 1, 0 -; GLOBAL-NEXT: xsmulsp 0, 0, 2 -; GLOBAL-NEXT: .LBB13_2: -; GLOBAL-NEXT: fmr 1, 0 +; GLOBAL-NEXT: xsmulsp 0, 2, 0 +; GLOBAL-NEXT: xxlxor 2, 2, 2 +; GLOBAL-NEXT: xsmulsp 0, 0, 3 +; GLOBAL-NEXT: fsel 2, 1, 2, 0 +; GLOBAL-NEXT: xsnegdp 1, 1 +; GLOBAL-NEXT: fsel 1, 1, 2, 0 ; GLOBAL-NEXT: blr %rt = call afn ninf float @llvm.sqrt.f32(float %x) ret float %rt @@ -462,24 +457,21 @@ define float @sqrt_fast_ieee(float %x) #0 { ; ; GLOBAL-LABEL: sqrt_fast_ieee: ; GLOBAL: # %bb.0: -; GLOBAL-NEXT: addis 3, 2, .LCPI15_1@toc@ha -; GLOBAL-NEXT: xsabsdp 0, 1 -; GLOBAL-NEXT: lfs 2, .LCPI15_1@toc@l(3) -; GLOBAL-NEXT: fcmpu 0, 0, 2 -; GLOBAL-NEXT: xxlxor 0, 0, 0 -; GLOBAL-NEXT: blt 0, .LBB15_2 -; GLOBAL-NEXT: # %bb.1: ; GLOBAL-NEXT: xsrsqrtesp 0, 1 ; GLOBAL-NEXT: vspltisw 2, -3 ; GLOBAL-NEXT: addis 3, 2, .LCPI15_0@toc@ha -; GLOBAL-NEXT: xvcvsxwdp 2, 34 -; GLOBAL-NEXT: xsmulsp 1, 1, 0 -; GLOBAL-NEXT: xsmaddasp 2, 1, 0 +; GLOBAL-NEXT: xvcvsxwdp 3, 34 +; GLOBAL-NEXT: xsmulsp 2, 1, 0 +; GLOBAL-NEXT: xsabsdp 1, 1 +; GLOBAL-NEXT: xsmaddasp 3, 2, 0 ; GLOBAL-NEXT: lfs 0, .LCPI15_0@toc@l(3) -; GLOBAL-NEXT: xsmulsp 0, 1, 0 -; GLOBAL-NEXT: xsmulsp 0, 0, 2 -; GLOBAL-NEXT: .LBB15_2: -; GLOBAL-NEXT: fmr 1, 0 +; GLOBAL-NEXT: addis 3, 2, .LCPI15_1@toc@ha +; GLOBAL-NEXT: xsmulsp 0, 2, 0 +; GLOBAL-NEXT: lfs 2, .LCPI15_1@toc@l(3) +; GLOBAL-NEXT: xssubsp 1, 1, 2 +; GLOBAL-NEXT: xxlxor 2, 2, 2 +; GLOBAL-NEXT: xsmulsp 0, 0, 3 +; GLOBAL-NEXT: fsel 1, 1, 0, 2 ; GLOBAL-NEXT: blr %rt = call contract reassoc afn ninf float @llvm.sqrt.f32(float %x) ret float %rt @@ -517,21 +509,19 @@ define float @sqrt_fast_preserve_sign(float %x) #1 { ; ; GLOBAL-LABEL: sqrt_fast_preserve_sign: ; GLOBAL: # %bb.0: -; GLOBAL-NEXT: xxlxor 0, 0, 0 -; GLOBAL-NEXT: fcmpu 0, 1, 0 -; GLOBAL-NEXT: beq 0, .LBB16_2 -; GLOBAL-NEXT: # %bb.1: ; GLOBAL-NEXT: xsrsqrtesp 0, 1 ; GLOBAL-NEXT: vspltisw 2, -3 ; GLOBAL-NEXT: addis 3, 2, .LCPI16_0@toc@ha -; GLOBAL-NEXT: xvcvsxwdp 2, 34 -; GLOBAL-NEXT: xsmulsp 1, 1, 0 -; GLOBAL-NEXT: xsmaddasp 2, 1, 0 +; GLOBAL-NEXT: xvcvsxwdp 3, 34 +; GLOBAL-NEXT: xsmulsp 2, 1, 0 +; GLOBAL-NEXT: xsmaddasp 3, 2, 0 ; GLOBAL-NEXT: lfs 0, .LCPI16_0@toc@l(3) -; GLOBAL-NEXT: xsmulsp 0, 1, 0 -; GLOBAL-NEXT: xsmulsp 0, 0, 2 -; GLOBAL-NEXT: .LBB16_2: -; GLOBAL-NEXT: fmr 1, 0 +; GLOBAL-NEXT: xsmulsp 0, 2, 0 +; GLOBAL-NEXT: xxlxor 2, 2, 2 +; GLOBAL-NEXT: xsmulsp 0, 0, 3 +; GLOBAL-NEXT: fsel 2, 1, 2, 0 +; GLOBAL-NEXT: xsnegdp 1, 1 +; GLOBAL-NEXT: fsel 1, 1, 2, 0 ; GLOBAL-NEXT: blr %rt = call contract reassoc ninf afn float @llvm.sqrt.f32(float %x) ret float %rt diff --git a/llvm/test/CodeGen/PowerPC/lxvkq-vec-constant.ll b/llvm/test/CodeGen/PowerPC/lxvkq-vec-constant.ll new file mode 100644 index 0000000..0ee4524 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/lxvkq-vec-constant.ll @@ -0,0 +1,307 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 + +; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-unknown \ +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC64-LE-10 + +; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64-unknown-unknown \ +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC64-BE-10 + +; Test LXVKQ instruction generation for special vector constants matching 128 bit patterns: +; 0x8000_0000_0000_0000_0000_0000_0000_0000 (MSB set pattern) +; 0x0000_0000_0000_0000_0000_0000_0000_0001 (LSB set pattern) + +; ============================================================================= +; v2i64 tests - MSB set pattern (0x8000_0000_0000_0000_0000_0000_0000_0000) +; ============================================================================= + +; Big-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <-9223372036854775808, 0> +define dso_local noundef <2 x i64> @test_v2i64_msb_set_bigendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v2i64_msb_set_bigendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: plxv v2, .LCPI0_0@PCREL(0), 1 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v2i64_msb_set_bigendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: lxvkq v2, 16 +; POWERPC64-BE-10-NEXT: blr +entry: + ret <2 x i64> <i64 -9223372036854775808, i64 0> +} + +; Little-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <0, -9223372036854775808> +define dso_local noundef <2 x i64> @test_v2i64_msb_set_littleendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v2i64_msb_set_littleendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: lxvkq v2, 16 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v2i64_msb_set_littleendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI1_0@toc@ha +; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI1_0@toc@l +; POWERPC64-BE-10-NEXT: lxv v2, 0(r3) +; POWERPC64-BE-10-NEXT: blr +entry: + ret <2 x i64> <i64 0, i64 -9223372036854775808> +} + +; ============================================================================= +; v4i32 tests - MSB set pattern (0x8000_0000_0000_0000_0000_0000_0000_0000) +; ============================================================================= + +; Big-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <-2147483648, 0, 0, 0> +define dso_local noundef <4 x i32> @test_v4i32_msb_set_bigendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v4i32_msb_set_bigendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: plxv v2, .LCPI2_0@PCREL(0), 1 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v4i32_msb_set_bigendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: lxvkq v2, 16 +; POWERPC64-BE-10-NEXT: blr +entry: + ret <4 x i32> <i32 -2147483648, i32 0, i32 0, i32 0> +} + +; Little-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <0, 0, 0, -2147483648> +define dso_local noundef <4 x i32> @test_v4i32_msb_set_littleendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v4i32_msb_set_littleendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: lxvkq v2, 16 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v4i32_msb_set_littleendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI3_0@toc@ha +; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI3_0@toc@l +; POWERPC64-BE-10-NEXT: lxv v2, 0(r3) +; POWERPC64-BE-10-NEXT: blr +entry: + ret <4 x i32> <i32 0, i32 0, i32 0, i32 -2147483648> +} + +; ============================================================================= +; v8i16 tests - MSB set pattern (0x8000_0000_0000_0000_0000_0000_0000_0000) +; ============================================================================= + +; Big-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <-32768, 0, 0, 0, 0, 0, 0, 0> +define dso_local noundef <8 x i16> @test_v8i16_msb_set_bigendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v8i16_msb_set_bigendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: plxv v2, .LCPI4_0@PCREL(0), 1 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v8i16_msb_set_bigendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: lxvkq v2, 16 +; POWERPC64-BE-10-NEXT: blr +entry: + ret <8 x i16> <i16 -32768, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> +} + +; Little-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <0, 0, 0, 0, 0, 0, 0, -32768> +define dso_local noundef <8 x i16> @test_v8i16_msb_set_littleendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v8i16_msb_set_littleendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: lxvkq v2, 16 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v8i16_msb_set_littleendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI5_0@toc@ha +; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI5_0@toc@l +; POWERPC64-BE-10-NEXT: lxv v2, 0(r3) +; POWERPC64-BE-10-NEXT: blr +entry: + ret <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 -32768> +} + +; ============================================================================= +; v16i8 tests - MSB set pattern (0x8000_0000_0000_0000_0000_0000_0000_0000) +; ============================================================================= + +; Big-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <-128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0> +define dso_local noundef <16 x i8> @test_v16i8_msb_set_bigendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v16i8_msb_set_bigendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: plxv v2, .LCPI6_0@PCREL(0), 1 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v16i8_msb_set_bigendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: lxvkq v2, 16 +; POWERPC64-BE-10-NEXT: blr +entry: + ret <16 x i8> <i8 -128, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0> +} + +; Little-Endian: 0x8000_0000_0000_0000_0000_0000_0000_0000 represents <0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -128> +define dso_local noundef <16 x i8> @test_v16i8_msb_set_littleendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v16i8_msb_set_littleendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: lxvkq v2, 16 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v16i8_msb_set_littleendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI7_0@toc@ha +; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI7_0@toc@l +; POWERPC64-BE-10-NEXT: lxv v2, 0(r3) +; POWERPC64-BE-10-NEXT: blr +entry: + ret <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 -128> +} + +; ============================================================================= +; v2i64 tests - LSB set pattern (0x0000_0000_0000_0000_0000_0000_0000_0001) +; ============================================================================= + +; Big-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <0, 1> +define dso_local noundef <2 x i64> @test_v2i64_lsb_set_bigendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v2i64_lsb_set_bigendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: plxv v2, .LCPI8_0@PCREL(0), 1 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v2i64_lsb_set_bigendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: xxspltib v2, 255 +; POWERPC64-BE-10-NEXT: vsrq v2, v2, v2 +; POWERPC64-BE-10-NEXT: blr +entry: + ret <2 x i64> <i64 0, i64 1> +} + +; Little-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <1, 0> +define dso_local noundef <2 x i64> @test_v2i64_lsb_set_littleendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v2i64_lsb_set_littleendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: xxspltib v2, 255 +; POWERPC64-LE-10-NEXT: vsrq v2, v2, v2 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v2i64_lsb_set_littleendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI9_0@toc@ha +; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI9_0@toc@l +; POWERPC64-BE-10-NEXT: lxv v2, 0(r3) +; POWERPC64-BE-10-NEXT: blr +entry: + ret <2 x i64> <i64 1, i64 0> +} + +; ============================================================================= +; v4i32 tests - LSB set pattern (0x0000_0000_0000_0000_0000_0000_0000_0001) +; ============================================================================= + +; Big-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <0, 0, 0, 1> +define dso_local noundef <4 x i32> @test_v4i32_lsb_set_bigendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v4i32_lsb_set_bigendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: plxv v2, .LCPI10_0@PCREL(0), 1 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v4i32_lsb_set_bigendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: xxspltib v2, 255 +; POWERPC64-BE-10-NEXT: vsrq v2, v2, v2 +; POWERPC64-BE-10-NEXT: blr +entry: + ret <4 x i32> <i32 0, i32 0, i32 0, i32 1> +} + +; Little-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <1, 0, 0, 0> +define dso_local noundef <4 x i32> @test_v4i32_lsb_set_littleendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v4i32_lsb_set_littleendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: xxspltib v2, 255 +; POWERPC64-LE-10-NEXT: vsrq v2, v2, v2 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v4i32_lsb_set_littleendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI11_0@toc@ha +; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI11_0@toc@l +; POWERPC64-BE-10-NEXT: lxv v2, 0(r3) +; POWERPC64-BE-10-NEXT: blr +entry: + ret <4 x i32> <i32 1, i32 0, i32 0, i32 0> +} + +; ============================================================================= +; v8i16 tests - LSB set pattern (0x0000_0000_0000_0000_0000_0000_0000_0001) +; ============================================================================= + +; Big-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <0, 0, 0, 0, 0, 0, 0, 1> +define dso_local noundef <8 x i16> @test_v8i16_lsb_set_bigendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v8i16_lsb_set_bigendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: plxv v2, .LCPI12_0@PCREL(0), 1 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v8i16_lsb_set_bigendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: xxspltib v2, 255 +; POWERPC64-BE-10-NEXT: vsrq v2, v2, v2 +; POWERPC64-BE-10-NEXT: blr +entry: + ret <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 1> +} + +; Little-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <1, 0, 0, 0, 0, 0, 0, 0> +define dso_local noundef <8 x i16> @test_v8i16_lsb_set_littleendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v8i16_lsb_set_littleendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: xxspltib v2, 255 +; POWERPC64-LE-10-NEXT: vsrq v2, v2, v2 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v8i16_lsb_set_littleendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI13_0@toc@ha +; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI13_0@toc@l +; POWERPC64-BE-10-NEXT: lxv v2, 0(r3) +; POWERPC64-BE-10-NEXT: blr +entry: + ret <8 x i16> <i16 1, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> +} + +; ============================================================================= +; v16i8 tests - LSB set pattern (0x0000_0000_0000_0000_0000_0000_0000_0001) +; ============================================================================= + +; Big-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1> +define dso_local noundef <16 x i8> @test_v16i8_lsb_set_bigendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v16i8_lsb_set_bigendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: plxv v2, .LCPI14_0@PCREL(0), 1 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v16i8_lsb_set_bigendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: xxspltib v2, 255 +; POWERPC64-BE-10-NEXT: vsrq v2, v2, v2 +; POWERPC64-BE-10-NEXT: blr +entry: + ret <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 1> +} + +; Little-Endian: 0x0000_0000_0000_0000_0000_0000_0000_0001 represents <1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0> +define dso_local noundef <16 x i8> @test_v16i8_lsb_set_littleendian() local_unnamed_addr { +; POWERPC64-LE-10-LABEL: test_v16i8_lsb_set_littleendian: +; POWERPC64-LE-10: # %bb.0: # %entry +; POWERPC64-LE-10-NEXT: xxspltib v2, 255 +; POWERPC64-LE-10-NEXT: vsrq v2, v2, v2 +; POWERPC64-LE-10-NEXT: blr +; +; POWERPC64-BE-10-LABEL: test_v16i8_lsb_set_littleendian: +; POWERPC64-BE-10: # %bb.0: # %entry +; POWERPC64-BE-10-NEXT: addis r3, r2, .LCPI15_0@toc@ha +; POWERPC64-BE-10-NEXT: addi r3, r3, .LCPI15_0@toc@l +; POWERPC64-BE-10-NEXT: lxv v2, 0(r3) +; POWERPC64-BE-10-NEXT: blr +entry: + ret <16 x i8> <i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0> +}
\ No newline at end of file diff --git a/llvm/test/CodeGen/PowerPC/vector-all-ones.ll b/llvm/test/CodeGen/PowerPC/vector-all-ones.ll deleted file mode 100644 index e4c93adc..0000000 --- a/llvm/test/CodeGen/PowerPC/vector-all-ones.ll +++ /dev/null @@ -1,23 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 -; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \ -; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s - -; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc64-ibm-aix \ -; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s - -; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc-ibm-aix \ -; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s - -; Currently the generated code uses `vspltisw` to generate vector of 1s followed by add operation. -; This pattern is expected to be optimized in a future patch by using `xxleqv` to generate vector of -1s -; followed by subtraction operation. -define dso_local noundef <4 x i32> @test1(<4 x i32> %a) { -; CHECK-LABEL: test1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vspltisw v3, 1 -; CHECK-NEXT: vadduwm v2, v2, v3 -; CHECK-NEXT: blr -entry: - %add = add <4 x i32> %a, splat (i32 1) - ret <4 x i32> %add -} diff --git a/llvm/test/CodeGen/PowerPC/vector-reduce-add.ll b/llvm/test/CodeGen/PowerPC/vector-reduce-add.ll index 0892210..d506d20 100644 --- a/llvm/test/CodeGen/PowerPC/vector-reduce-add.ll +++ b/llvm/test/CodeGen/PowerPC/vector-reduce-add.ll @@ -1566,12 +1566,16 @@ define dso_local i64 @v16i8tov16i64_sign(<16 x i8> %a) local_unnamed_addr #0 { ; PWR10BE-LABEL: v16i8tov16i64_sign: ; PWR10BE: # %bb.0: # %entry ; PWR10BE-NEXT: addis r3, r2, .LCPI23_0@toc@ha +; PWR10BE-NEXT: xxspltib v1, 255 ; PWR10BE-NEXT: addi r3, r3, .LCPI23_0@toc@l +; PWR10BE-NEXT: vsrq v1, v1, v1 ; PWR10BE-NEXT: lxv v3, 0(r3) ; PWR10BE-NEXT: addis r3, r2, .LCPI23_1@toc@ha ; PWR10BE-NEXT: addi r3, r3, .LCPI23_1@toc@l +; PWR10BE-NEXT: vperm v1, v2, v2, v1 ; PWR10BE-NEXT: lxv v4, 0(r3) ; PWR10BE-NEXT: addis r3, r2, .LCPI23_2@toc@ha +; PWR10BE-NEXT: vextsb2d v1, v1 ; PWR10BE-NEXT: vperm v3, v2, v2, v3 ; PWR10BE-NEXT: addi r3, r3, .LCPI23_2@toc@l ; PWR10BE-NEXT: vextsb2d v3, v3 @@ -1585,23 +1589,18 @@ define dso_local i64 @v16i8tov16i64_sign(<16 x i8> %a) local_unnamed_addr #0 { ; PWR10BE-NEXT: vperm v5, v2, v2, v5 ; PWR10BE-NEXT: addi r3, r3, .LCPI23_4@toc@l ; PWR10BE-NEXT: vextsb2d v5, v5 -; PWR10BE-NEXT: lxv v1, 0(r3) +; PWR10BE-NEXT: lxv v6, 0(r3) ; PWR10BE-NEXT: addis r3, r2, .LCPI23_5@toc@ha ; PWR10BE-NEXT: vperm v0, v2, v2, v0 ; PWR10BE-NEXT: addi r3, r3, .LCPI23_5@toc@l ; PWR10BE-NEXT: vextsb2d v0, v0 -; PWR10BE-NEXT: lxv v6, 0(r3) +; PWR10BE-NEXT: lxv v7, 0(r3) ; PWR10BE-NEXT: addis r3, r2, .LCPI23_6@toc@ha -; PWR10BE-NEXT: vperm v1, v2, v2, v1 +; PWR10BE-NEXT: vperm v6, v2, v2, v6 ; PWR10BE-NEXT: vaddudm v5, v0, v5 ; PWR10BE-NEXT: vaddudm v3, v4, v3 ; PWR10BE-NEXT: vaddudm v3, v3, v5 ; PWR10BE-NEXT: addi r3, r3, .LCPI23_6@toc@l -; PWR10BE-NEXT: vextsb2d v1, v1 -; PWR10BE-NEXT: lxv v7, 0(r3) -; PWR10BE-NEXT: addis r3, r2, .LCPI23_7@toc@ha -; PWR10BE-NEXT: vperm v6, v2, v2, v6 -; PWR10BE-NEXT: addi r3, r3, .LCPI23_7@toc@l ; PWR10BE-NEXT: vextsb2d v6, v6 ; PWR10BE-NEXT: lxv v8, 0(r3) ; PWR10BE-NEXT: vperm v7, v2, v2, v7 @@ -1609,7 +1608,7 @@ define dso_local i64 @v16i8tov16i64_sign(<16 x i8> %a) local_unnamed_addr #0 { ; PWR10BE-NEXT: vperm v2, v2, v2, v8 ; PWR10BE-NEXT: vextsb2d v2, v2 ; PWR10BE-NEXT: vaddudm v2, v2, v7 -; PWR10BE-NEXT: vaddudm v4, v6, v1 +; PWR10BE-NEXT: vaddudm v4, v1, v6 ; PWR10BE-NEXT: vaddudm v2, v4, v2 ; PWR10BE-NEXT: vaddudm v2, v2, v3 ; PWR10BE-NEXT: xxswapd v3, v2 diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-eqv.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-eqv.ll index 24a1724..ba7680b 100644 --- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-eqv.ll +++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-eqv.ll @@ -15,11 +15,9 @@ define <4 x i32> @ternary_A_or_BC_eqv_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3 ; CHECK-LABEL: ternary_A_or_BC_eqv_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxleqv vs1, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 151 ; CHECK-NEXT: blr entry: %or = or <4 x i32> %B, %C @@ -34,12 +32,10 @@ define <2 x i64> @ternary_A_or_BC_eqv_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6 ; CHECK-LABEL: ternary_A_or_BC_eqv_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxleqv vs1, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 151 ; CHECK-NEXT: blr entry: %or = or <2 x i64> %B, %C @@ -54,11 +50,9 @@ define <16 x i8> @ternary_A_or_BC_eqv_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_or_BC_eqv_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxleqv vs1, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 151 ; CHECK-NEXT: blr entry: %or = or <16 x i8> %B, %C @@ -73,11 +67,9 @@ define <8 x i16> @ternary_A_or_BC_eqv_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1 ; CHECK-LABEL: ternary_A_or_BC_eqv_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxleqv vs1, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 151 ; CHECK-NEXT: blr entry: %or = or <8 x i16> %B, %C @@ -92,11 +84,9 @@ define <4 x i32> @ternary_A_nor_BC_eqv_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i ; CHECK-LABEL: ternary_A_nor_BC_eqv_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v3, v4 -; CHECK-NEXT: xxleqv vs1, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 152 ; CHECK-NEXT: blr entry: %or = or <4 x i32> %B, %C @@ -112,12 +102,10 @@ define <2 x i64> @ternary_A_nor_BC_eqv_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i ; CHECK-LABEL: ternary_A_nor_BC_eqv_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v3, v4 -; CHECK-NEXT: xxleqv vs1, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 152 ; CHECK-NEXT: blr entry: %or = or <2 x i64> %B, %C @@ -133,11 +121,9 @@ define <16 x i8> @ternary_A_nor_BC_eqv_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_nor_BC_eqv_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnor vs0, v3, v4 -; CHECK-NEXT: xxleqv vs1, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 152 ; CHECK-NEXT: blr entry: %or = or <16 x i8> %B, %C @@ -153,11 +139,9 @@ define <8 x i16> @ternary_A_nor_BC_eqv_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i ; CHECK-LABEL: ternary_A_nor_BC_eqv_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnor vs0, v3, v4 -; CHECK-NEXT: xxleqv vs1, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 152 ; CHECK-NEXT: blr entry: %or = or <8 x i16> %B, %C @@ -173,10 +157,9 @@ define <4 x i32> @ternary_A_not_C_eqv_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3 ; CHECK-LABEL: ternary_A_not_C_eqv_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v4, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxeval v2, v2, vs0, v3, 99 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 154 ; CHECK-NEXT: blr entry: %not = xor <4 x i32> %C, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation @@ -191,12 +174,10 @@ define <2 x i64> @ternary_A_not_C_eqv_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6 ; CHECK-LABEL: ternary_A_not_C_eqv_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v4, v4 -; CHECK-NEXT: xxleqv vs1, v4, v3 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 154 ; CHECK-NEXT: blr entry: %not = xor <2 x i64> %C, <i64 -1, i64 -1> ; Vector not operation @@ -211,11 +192,9 @@ define <16 x i8> @ternary_A_not_C_eqv_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_not_C_eqv_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnor vs0, v4, v4 -; CHECK-NEXT: xxleqv vs1, v4, v3 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 154 ; CHECK-NEXT: blr entry: %not = xor <16 x i8> %C, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation @@ -230,11 +209,9 @@ define <8 x i16> @ternary_A_not_C_eqv_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1 ; CHECK-LABEL: ternary_A_not_C_eqv_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnor vs0, v4, v4 -; CHECK-NEXT: xxleqv vs1, v4, v3 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 154 ; CHECK-NEXT: blr entry: %not = xor <8 x i16> %C, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation @@ -249,11 +226,9 @@ define <4 x i32> @ternary_A_nand_BC_eqv_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x ; CHECK-LABEL: ternary_A_nand_BC_eqv_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxleqv vs1, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 158 ; CHECK-NEXT: blr entry: %and = and <4 x i32> %B, %C @@ -269,12 +244,10 @@ define <2 x i64> @ternary_A_nand_BC_eqv_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x ; CHECK-LABEL: ternary_A_nand_BC_eqv_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxleqv vs1, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 158 ; CHECK-NEXT: blr entry: %and = and <2 x i64> %B, %C @@ -290,11 +263,9 @@ define <16 x i8> @ternary_A_nand_BC_eqv_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 ; CHECK-LABEL: ternary_A_nand_BC_eqv_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxleqv vs1, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 158 ; CHECK-NEXT: blr entry: %and = and <16 x i8> %B, %C @@ -310,11 +281,9 @@ define <8 x i16> @ternary_A_nand_BC_eqv_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x ; CHECK-LABEL: ternary_A_nand_BC_eqv_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxleqv vs1, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 158 ; CHECK-NEXT: blr entry: %and = and <8 x i16> %B, %C diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nand.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nand.ll index 7a6733d3..067b089 100644 --- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nand.ll +++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nand.ll @@ -15,10 +15,9 @@ define <4 x i32> @ternary_A_B_nand_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> ; CHECK-LABEL: ternary_A_B_nand_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnand vs0, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v3, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 227 ; CHECK-NEXT: blr entry: %and = and <4 x i32> %B, %C @@ -32,11 +31,10 @@ define <2 x i64> @ternary_A_B_nand_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> ; CHECK-LABEL: ternary_A_B_nand_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnand vs0, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v3, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 227 ; CHECK-NEXT: blr entry: %and = and <2 x i64> %B, %C @@ -50,10 +48,9 @@ define <16 x i8> @ternary_A_B_nand_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8> ; CHECK-LABEL: ternary_A_B_nand_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnand vs0, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v3, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 227 ; CHECK-NEXT: blr entry: %and = and <16 x i8> %B, %C @@ -67,10 +64,9 @@ define <8 x i16> @ternary_A_B_nand_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> ; CHECK-LABEL: ternary_A_B_nand_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnand vs0, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v3, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 227 ; CHECK-NEXT: blr entry: %and = and <8 x i16> %B, %C @@ -84,10 +80,9 @@ define <4 x i32> @ternary_A_C_nand_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> ; CHECK-LABEL: ternary_A_C_nand_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnand vs0, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v4, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 229 ; CHECK-NEXT: blr entry: %and = and <4 x i32> %B, %C @@ -101,11 +96,10 @@ define <2 x i64> @ternary_A_C_nand_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> ; CHECK-LABEL: ternary_A_C_nand_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnand vs0, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v4, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 229 ; CHECK-NEXT: blr entry: %and = and <2 x i64> %B, %C @@ -119,10 +113,9 @@ define <16 x i8> @ternary_A_C_nand_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8> ; CHECK-LABEL: ternary_A_C_nand_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnand vs0, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v4, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 229 ; CHECK-NEXT: blr entry: %and = and <16 x i8> %B, %C @@ -136,10 +129,9 @@ define <8 x i16> @ternary_A_C_nand_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> ; CHECK-LABEL: ternary_A_C_nand_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnand vs0, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v4, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 229 ; CHECK-NEXT: blr entry: %and = and <8 x i16> %B, %C @@ -153,11 +145,9 @@ define <4 x i32> @ternary_A_xor_BC_nand_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x ; CHECK-LABEL: ternary_A_xor_BC_nand_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnand vs1, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 230 ; CHECK-NEXT: blr entry: %xor = xor <4 x i32> %B, %C @@ -172,12 +162,10 @@ define <2 x i64> @ternary_A_xor_BC_nand_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x ; CHECK-LABEL: ternary_A_xor_BC_nand_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnand vs1, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 230 ; CHECK-NEXT: blr entry: %xor = xor <2 x i64> %B, %C @@ -192,11 +180,9 @@ define <16 x i8> @ternary_A_xor_BC_nand_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 ; CHECK-LABEL: ternary_A_xor_BC_nand_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnand vs1, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 230 ; CHECK-NEXT: blr entry: %xor = xor <16 x i8> %B, %C @@ -211,11 +197,9 @@ define <8 x i16> @ternary_A_xor_BC_nand_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x ; CHECK-LABEL: ternary_A_xor_BC_nand_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnand vs1, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 230 ; CHECK-NEXT: blr entry: %xor = xor <8 x i16> %B, %C @@ -230,11 +214,9 @@ define <4 x i32> @ternary_A_or_BC_nand_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i ; CHECK-LABEL: ternary_A_or_BC_nand_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxlnand vs1, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 231 ; CHECK-NEXT: blr entry: %or = or <4 x i32> %B, %C @@ -249,12 +231,10 @@ define <2 x i64> @ternary_A_or_BC_nand_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i ; CHECK-LABEL: ternary_A_or_BC_nand_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxlnand vs1, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 231 ; CHECK-NEXT: blr entry: %or = or <2 x i64> %B, %C @@ -269,11 +249,9 @@ define <16 x i8> @ternary_A_or_BC_nand_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_or_BC_nand_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxlnand vs1, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 231 ; CHECK-NEXT: blr entry: %or = or <16 x i8> %B, %C @@ -288,11 +266,9 @@ define <8 x i16> @ternary_A_or_BC_nand_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i ; CHECK-LABEL: ternary_A_or_BC_nand_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxlnand vs1, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 231 ; CHECK-NEXT: blr entry: %or = or <8 x i16> %B, %C @@ -307,11 +283,9 @@ define <4 x i32> @ternary_A_eqv_BC_nand_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x ; CHECK-LABEL: ternary_A_eqv_BC_nand_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxleqv vs0, v3, v4 -; CHECK-NEXT: xxlnand vs1, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 233 ; CHECK-NEXT: blr entry: %xor = xor <4 x i32> %B, %C @@ -327,12 +301,10 @@ define <2 x i64> @ternary_A_eqv_BC_nand_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x ; CHECK-LABEL: ternary_A_eqv_BC_nand_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxleqv vs0, v3, v4 -; CHECK-NEXT: xxlnand vs1, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 233 ; CHECK-NEXT: blr entry: %xor = xor <2 x i64> %B, %C @@ -348,11 +320,9 @@ define <16 x i8> @ternary_A_eqv_BC_nand_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 ; CHECK-LABEL: ternary_A_eqv_BC_nand_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxleqv vs0, v3, v4 -; CHECK-NEXT: xxlnand vs1, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 233 ; CHECK-NEXT: blr entry: %xor = xor <16 x i8> %B, %C @@ -368,11 +338,9 @@ define <8 x i16> @ternary_A_eqv_BC_nand_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x ; CHECK-LABEL: ternary_A_eqv_BC_nand_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxleqv vs0, v3, v4 -; CHECK-NEXT: xxlnand vs1, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 233 ; CHECK-NEXT: blr entry: %xor = xor <8 x i16> %B, %C diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nor.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nor.ll index d635952..3695874 100644 --- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nor.ll +++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-nor.ll @@ -15,11 +15,9 @@ define <4 x i32> @ternary_A_and_BC_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i ; CHECK-LABEL: ternary_A_and_BC_nor_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxland vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 129 ; CHECK-NEXT: blr entry: %and = and <4 x i32> %B, %C @@ -34,12 +32,10 @@ define <2 x i64> @ternary_A_and_BC_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i ; CHECK-LABEL: ternary_A_and_BC_nor_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxland vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 129 ; CHECK-NEXT: blr entry: %and = and <2 x i64> %B, %C @@ -54,11 +50,9 @@ define <16 x i8> @ternary_A_and_BC_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_and_BC_nor_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxland vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 129 ; CHECK-NEXT: blr entry: %and = and <16 x i8> %B, %C @@ -73,11 +67,9 @@ define <8 x i16> @ternary_A_and_BC_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i ; CHECK-LABEL: ternary_A_and_BC_nor_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxland vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 129 ; CHECK-NEXT: blr entry: %and = and <8 x i16> %B, %C @@ -92,10 +84,9 @@ define <4 x i32> @ternary_A_B_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> % ; CHECK-LABEL: ternary_A_B_nor_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v3, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 131 ; CHECK-NEXT: blr entry: %or = or <4 x i32> %B, %C @@ -109,11 +100,10 @@ define <2 x i64> @ternary_A_B_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> % ; CHECK-LABEL: ternary_A_B_nor_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v3, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 131 ; CHECK-NEXT: blr entry: %or = or <2 x i64> %B, %C @@ -127,10 +117,9 @@ define <16 x i8> @ternary_A_B_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8> ; CHECK-LABEL: ternary_A_B_nor_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnor vs0, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v3, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 131 ; CHECK-NEXT: blr entry: %or = or <16 x i8> %B, %C @@ -144,10 +133,9 @@ define <8 x i16> @ternary_A_B_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> % ; CHECK-LABEL: ternary_A_B_nor_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnor vs0, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v3, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 131 ; CHECK-NEXT: blr entry: %or = or <8 x i16> %B, %C @@ -161,10 +149,9 @@ define <4 x i32> @ternary_A_C_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> % ; CHECK-LABEL: ternary_A_C_nor_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v4, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 133 ; CHECK-NEXT: blr entry: %or = or <4 x i32> %B, %C @@ -178,11 +165,10 @@ define <2 x i64> @ternary_A_C_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> % ; CHECK-LABEL: ternary_A_C_nor_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v4, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 133 ; CHECK-NEXT: blr entry: %or = or <2 x i64> %B, %C @@ -196,10 +182,9 @@ define <16 x i8> @ternary_A_C_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8> ; CHECK-LABEL: ternary_A_C_nor_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnor vs0, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v4, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 133 ; CHECK-NEXT: blr entry: %or = or <16 x i8> %B, %C @@ -213,10 +198,9 @@ define <8 x i16> @ternary_A_C_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> % ; CHECK-LABEL: ternary_A_C_nor_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnor vs0, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v4, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 133 ; CHECK-NEXT: blr entry: %or = or <8 x i16> %B, %C @@ -230,11 +214,9 @@ define <4 x i32> @ternary_A_xor_BC_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i ; CHECK-LABEL: ternary_A_xor_BC_nor_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 134 ; CHECK-NEXT: blr entry: %xor = xor <4 x i32> %B, %C @@ -249,12 +231,10 @@ define <2 x i64> @ternary_A_xor_BC_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i ; CHECK-LABEL: ternary_A_xor_BC_nor_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 134 ; CHECK-NEXT: blr entry: %xor = xor <2 x i64> %B, %C @@ -269,11 +249,9 @@ define <16 x i8> @ternary_A_xor_BC_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_xor_BC_nor_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 134 ; CHECK-NEXT: blr entry: %xor = xor <16 x i8> %B, %C @@ -288,11 +266,9 @@ define <8 x i16> @ternary_A_xor_BC_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i ; CHECK-LABEL: ternary_A_xor_BC_nor_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 134 ; CHECK-NEXT: blr entry: %xor = xor <8 x i16> %B, %C @@ -307,11 +283,9 @@ define <4 x i32> @ternary_A_not_C_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3 ; CHECK-LABEL: ternary_A_not_C_nor_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v4, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 138 ; CHECK-NEXT: blr entry: %not = xor <4 x i32> %C, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation @@ -326,12 +300,10 @@ define <2 x i64> @ternary_A_not_C_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6 ; CHECK-LABEL: ternary_A_not_C_nor_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v4, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 138 ; CHECK-NEXT: blr entry: %not = xor <2 x i64> %C, <i64 -1, i64 -1> ; Vector not operation @@ -346,11 +318,9 @@ define <16 x i8> @ternary_A_not_C_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_not_C_nor_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnor vs0, v4, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 138 ; CHECK-NEXT: blr entry: %not = xor <16 x i8> %C, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation @@ -365,11 +335,9 @@ define <8 x i16> @ternary_A_not_C_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1 ; CHECK-LABEL: ternary_A_not_C_nor_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnor vs0, v4, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 138 ; CHECK-NEXT: blr entry: %not = xor <8 x i16> %C, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation @@ -384,11 +352,9 @@ define <4 x i32> @ternary_A_not_B_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3 ; CHECK-LABEL: ternary_A_not_B_nor_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v3, v3 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 140 ; CHECK-NEXT: blr entry: %not = xor <4 x i32> %B, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation @@ -403,12 +369,10 @@ define <2 x i64> @ternary_A_not_B_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6 ; CHECK-LABEL: ternary_A_not_B_nor_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v3, v3 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 140 ; CHECK-NEXT: blr entry: %not = xor <2 x i64> %B, <i64 -1, i64 -1> ; Vector not operation @@ -423,11 +387,9 @@ define <16 x i8> @ternary_A_not_B_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_not_B_nor_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnor vs0, v3, v3 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 140 ; CHECK-NEXT: blr entry: %not = xor <16 x i8> %B, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation @@ -442,11 +404,9 @@ define <8 x i16> @ternary_A_not_B_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1 ; CHECK-LABEL: ternary_A_not_B_nor_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnor vs0, v3, v3 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 140 ; CHECK-NEXT: blr entry: %not = xor <8 x i16> %B, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation @@ -461,11 +421,9 @@ define <4 x i32> @ternary_A_nand_BC_nor_BC_4x32(<4 x i1> %A, <4 x i32> %B, <4 x ; CHECK-LABEL: ternary_A_nand_BC_nor_BC_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 142 ; CHECK-NEXT: blr entry: %and = and <4 x i32> %B, %C @@ -481,12 +439,10 @@ define <2 x i64> @ternary_A_nand_BC_nor_BC_2x64(<2 x i1> %A, <2 x i64> %B, <2 x ; CHECK-LABEL: ternary_A_nand_BC_nor_BC_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 142 ; CHECK-NEXT: blr entry: %and = and <2 x i64> %B, %C @@ -502,11 +458,9 @@ define <16 x i8> @ternary_A_nand_BC_nor_BC_16x8(<16 x i1> %A, <16 x i8> %B, <16 ; CHECK-LABEL: ternary_A_nand_BC_nor_BC_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 142 ; CHECK-NEXT: blr entry: %and = and <16 x i8> %B, %C @@ -522,11 +476,9 @@ define <8 x i16> @ternary_A_nand_BC_nor_BC_8x16(<8 x i1> %A, <8 x i16> %B, <8 x ; CHECK-LABEL: ternary_A_nand_BC_nor_BC_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 142 ; CHECK-NEXT: blr entry: %and = and <8 x i16> %B, %C diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-b.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-b.ll index 6203a96..a67d9cf 100644 --- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-b.ll +++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-b.ll @@ -15,11 +15,9 @@ define <4 x i32> @ternary_A_and_BC_not_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3 ; CHECK-LABEL: ternary_A_and_BC_not_B_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxland vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 193 ; CHECK-NEXT: blr entry: %and = and <4 x i32> %B, %C @@ -33,12 +31,10 @@ define <2 x i64> @ternary_A_and_BC_not_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6 ; CHECK-LABEL: ternary_A_and_BC_not_B_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxland vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 193 ; CHECK-NEXT: blr entry: %and = and <2 x i64> %B, %C @@ -52,11 +48,9 @@ define <16 x i8> @ternary_A_and_BC_not_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_and_BC_not_B_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxland vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 193 ; CHECK-NEXT: blr entry: %and = and <16 x i8> %B, %C @@ -70,11 +64,9 @@ define <8 x i16> @ternary_A_and_BC_not_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1 ; CHECK-LABEL: ternary_A_and_BC_not_B_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxland vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 193 ; CHECK-NEXT: blr entry: %and = and <8 x i16> %B, %C @@ -88,11 +80,9 @@ define <4 x i32> @ternary_A_xor_BC_not_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3 ; CHECK-LABEL: ternary_A_xor_BC_not_B_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 198 ; CHECK-NEXT: blr entry: %xor = xor <4 x i32> %B, %C @@ -106,12 +96,10 @@ define <2 x i64> @ternary_A_xor_BC_not_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6 ; CHECK-LABEL: ternary_A_xor_BC_not_B_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 198 ; CHECK-NEXT: blr entry: %xor = xor <2 x i64> %B, %C @@ -125,11 +113,9 @@ define <16 x i8> @ternary_A_xor_BC_not_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_xor_BC_not_B_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 198 ; CHECK-NEXT: blr entry: %xor = xor <16 x i8> %B, %C @@ -143,11 +129,9 @@ define <8 x i16> @ternary_A_xor_BC_not_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1 ; CHECK-LABEL: ternary_A_xor_BC_not_B_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 198 ; CHECK-NEXT: blr entry: %xor = xor <8 x i16> %B, %C @@ -161,11 +145,9 @@ define <4 x i32> @ternary_A_or_BC_not_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32 ; CHECK-LABEL: ternary_A_or_BC_not_B_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 199 ; CHECK-NEXT: blr entry: %or = or <4 x i32> %B, %C @@ -179,12 +161,10 @@ define <2 x i64> @ternary_A_or_BC_not_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64 ; CHECK-LABEL: ternary_A_or_BC_not_B_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 199 ; CHECK-NEXT: blr entry: %or = or <2 x i64> %B, %C @@ -198,11 +178,9 @@ define <16 x i8> @ternary_A_or_BC_not_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i ; CHECK-LABEL: ternary_A_or_BC_not_B_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 199 ; CHECK-NEXT: blr entry: %or = or <16 x i8> %B, %C @@ -216,11 +194,9 @@ define <8 x i16> @ternary_A_or_BC_not_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16 ; CHECK-LABEL: ternary_A_or_BC_not_B_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 199 ; CHECK-NEXT: blr entry: %or = or <8 x i16> %B, %C @@ -234,11 +210,9 @@ define <4 x i32> @ternary_A_nand_BC_not_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i ; CHECK-LABEL: ternary_A_nand_BC_not_B_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 206 ; CHECK-NEXT: blr entry: %and = and <4 x i32> %B, %C @@ -253,12 +227,10 @@ define <2 x i64> @ternary_A_nand_BC_not_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i ; CHECK-LABEL: ternary_A_nand_BC_not_B_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 206 ; CHECK-NEXT: blr entry: %and = and <2 x i64> %B, %C @@ -273,11 +245,9 @@ define <16 x i8> @ternary_A_nand_BC_not_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_nand_BC_not_B_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 206 ; CHECK-NEXT: blr entry: %and = and <16 x i8> %B, %C @@ -292,11 +262,9 @@ define <8 x i16> @ternary_A_nand_BC_not_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i ; CHECK-LABEL: ternary_A_nand_BC_not_B_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v3, v3 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 206 ; CHECK-NEXT: blr entry: %and = and <8 x i16> %B, %C diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-c.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-c.ll index 3479d94..98c1f28 100644 --- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-c.ll +++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-not-c.ll @@ -15,11 +15,9 @@ define <4 x i32> @ternary_A_and_BC_not_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3 ; CHECK-LABEL: ternary_A_and_BC_not_C_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxland vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 161 ; CHECK-NEXT: blr entry: %and = and <4 x i32> %B, %C @@ -33,12 +31,10 @@ define <2 x i64> @ternary_A_and_BC_not_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6 ; CHECK-LABEL: ternary_A_and_BC_not_C_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxland vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 161 ; CHECK-NEXT: blr entry: %and = and <2 x i64> %B, %C @@ -52,11 +48,9 @@ define <16 x i8> @ternary_A_and_BC_not_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_and_BC_not_C_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxland vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 161 ; CHECK-NEXT: blr entry: %and = and <16 x i8> %B, %C @@ -70,11 +64,9 @@ define <8 x i16> @ternary_A_and_BC_not_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1 ; CHECK-LABEL: ternary_A_and_BC_not_C_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxland vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 161 ; CHECK-NEXT: blr entry: %and = and <8 x i16> %B, %C @@ -88,10 +80,9 @@ define <4 x i32> @ternary_A_B_not_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %C ; CHECK-LABEL: ternary_A_B_not_C_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v4, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v3, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 163 ; CHECK-NEXT: blr entry: %not = xor <4 x i32> %C, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation @@ -104,11 +95,10 @@ define <2 x i64> @ternary_A_B_not_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %C ; CHECK-LABEL: ternary_A_B_not_C_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v4, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v3, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 163 ; CHECK-NEXT: blr entry: %not = xor <2 x i64> %C, <i64 -1, i64 -1> ; Vector not operation @@ -121,10 +111,9 @@ define <16 x i8> @ternary_A_B_not_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8> % ; CHECK-LABEL: ternary_A_B_not_C_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnor vs0, v4, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v3, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 163 ; CHECK-NEXT: blr entry: %not = xor <16 x i8> %C, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation @@ -137,10 +126,9 @@ define <8 x i16> @ternary_A_B_not_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %C ; CHECK-LABEL: ternary_A_B_not_C_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnor vs0, v4, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs0, v3, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 163 ; CHECK-NEXT: blr entry: %not = xor <8 x i16> %C, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation @@ -153,11 +141,9 @@ define <4 x i32> @ternary_A_xor_BC_not_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i3 ; CHECK-LABEL: ternary_A_xor_BC_not_C_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 166 ; CHECK-NEXT: blr entry: %xor = xor <4 x i32> %B, %C @@ -171,12 +157,10 @@ define <2 x i64> @ternary_A_xor_BC_not_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i6 ; CHECK-LABEL: ternary_A_xor_BC_not_C_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 166 ; CHECK-NEXT: blr entry: %xor = xor <2 x i64> %B, %C @@ -190,11 +174,9 @@ define <16 x i8> @ternary_A_xor_BC_not_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_xor_BC_not_C_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 166 ; CHECK-NEXT: blr entry: %xor = xor <16 x i8> %B, %C @@ -208,11 +190,9 @@ define <8 x i16> @ternary_A_xor_BC_not_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i1 ; CHECK-LABEL: ternary_A_xor_BC_not_C_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlxor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 166 ; CHECK-NEXT: blr entry: %xor = xor <8 x i16> %B, %C @@ -226,11 +206,9 @@ define <4 x i32> @ternary_A_or_BC_not_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32 ; CHECK-LABEL: ternary_A_or_BC_not_C_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 167 ; CHECK-NEXT: blr entry: %or = or <4 x i32> %B, %C @@ -244,12 +222,10 @@ define <2 x i64> @ternary_A_or_BC_not_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64 ; CHECK-LABEL: ternary_A_or_BC_not_C_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 167 ; CHECK-NEXT: blr entry: %or = or <2 x i64> %B, %C @@ -263,11 +239,9 @@ define <16 x i8> @ternary_A_or_BC_not_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i ; CHECK-LABEL: ternary_A_or_BC_not_C_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 167 ; CHECK-NEXT: blr entry: %or = or <16 x i8> %B, %C @@ -281,11 +255,9 @@ define <8 x i16> @ternary_A_or_BC_not_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16 ; CHECK-LABEL: ternary_A_or_BC_not_C_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlor vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 167 ; CHECK-NEXT: blr entry: %or = or <8 x i16> %B, %C @@ -299,11 +271,9 @@ define <4 x i32> @ternary_A_not_B_not_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32 ; CHECK-LABEL: ternary_A_not_B_not_C_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v3, v3 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 172 ; CHECK-NEXT: blr entry: %not_b = xor <4 x i32> %B, <i32 -1, i32 -1, i32 -1, i32 -1> ; Vector not operation @@ -317,12 +287,10 @@ define <2 x i64> @ternary_A_not_B_not_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64 ; CHECK-LABEL: ternary_A_not_B_not_C_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnor vs0, v3, v3 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 172 ; CHECK-NEXT: blr entry: %not_b = xor <2 x i64> %B, <i64 -1, i64 -1> ; Vector not operation @@ -336,11 +304,9 @@ define <16 x i8> @ternary_A_not_B_not_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i ; CHECK-LABEL: ternary_A_not_B_not_C_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnor vs0, v3, v3 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 172 ; CHECK-NEXT: blr entry: %not_b = xor <16 x i8> %B, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> ; Vector not operation @@ -354,11 +320,9 @@ define <8 x i16> @ternary_A_not_B_not_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16 ; CHECK-LABEL: ternary_A_not_B_not_C_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnor vs0, v3, v3 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 172 ; CHECK-NEXT: blr entry: %not_b = xor <8 x i16> %B, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> ; Vector not operation @@ -372,11 +336,9 @@ define <4 x i32> @ternary_A_nand_BC_not_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i ; CHECK-LABEL: ternary_A_nand_BC_not_C_4x32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxleqv v5, v5, v5 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslw v2, v2, v5 ; CHECK-NEXT: vsraw v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 174 ; CHECK-NEXT: blr entry: %and = and <4 x i32> %B, %C @@ -391,12 +353,10 @@ define <2 x i64> @ternary_A_nand_BC_not_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i ; CHECK-LABEL: ternary_A_nand_BC_not_C_2x64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v5, v5, v5 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: xxsplti32dx v5, 1, 63 ; CHECK-NEXT: vsld v2, v2, v5 ; CHECK-NEXT: vsrad v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 174 ; CHECK-NEXT: blr entry: %and = and <2 x i64> %B, %C @@ -411,11 +371,9 @@ define <16 x i8> @ternary_A_nand_BC_not_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x ; CHECK-LABEL: ternary_A_nand_BC_not_C_16x8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltib v5, 7 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslb v2, v2, v5 ; CHECK-NEXT: vsrab v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 174 ; CHECK-NEXT: blr entry: %and = and <16 x i8> %B, %C @@ -430,11 +388,9 @@ define <8 x i16> @ternary_A_nand_BC_not_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i ; CHECK-LABEL: ternary_A_nand_BC_not_C_8x16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxspltiw v5, 983055 -; CHECK-NEXT: xxlnand vs0, v3, v4 -; CHECK-NEXT: xxlnor vs1, v4, v4 ; CHECK-NEXT: vslh v2, v2, v5 ; CHECK-NEXT: vsrah v2, v2, v5 -; CHECK-NEXT: xxsel v2, vs1, vs0, v2 +; CHECK-NEXT: xxeval v2, v2, v3, v4, 174 ; CHECK-NEXT: blr entry: %and = and <8 x i16> %B, %C diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir index 2e500d5..da7546e 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir @@ -689,8 +689,8 @@ # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected # DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected # DEBUG-NEXT: G_INSERT_VECTOR_ELT (opcode {{[0-9]+}}): 3 type indices, 0 imm indices -# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined -# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected +# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected # DEBUG-NEXT: G_EXTRACT_VECTOR_ELT (opcode {{[0-9]+}}): 3 type indices, 0 imm indices # DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined # DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-rv32.mir new file mode 100644 index 0000000..d7c0e80 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-rv32.mir @@ -0,0 +1,1742 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s + +--- +name: insertelement_nxv1i1_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i1_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 false + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s32) + $v0 = COPY %0(<vscale x 1 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv1i1_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i1_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 true + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s32) + $v0 = COPY %0(<vscale x 1 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv1i1_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv1i1_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[COPY1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %2:_(s32) = COPY $x10 + %0:_(s1) = G_TRUNC %2(s32) + %1:_(s32) = COPY $x11 + %4:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF + %3:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT %4, %0(s1), %1(s32) + $v0 = COPY %3(<vscale x 1 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv2i1_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i1_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 false + %3:_(s32) = G_CONSTANT i32 1 + %0:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s32) + $v0 = COPY %0(<vscale x 2 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv2i1_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i1_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 true + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s32) + $v0 = COPY %0(<vscale x 2 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv2i1_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv2i1_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[COPY1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %2:_(s32) = COPY $x10 + %0:_(s1) = G_TRUNC %2(s32) + %1:_(s32) = COPY $x11 + %4:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF + %3:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT %4, %0(s1), %1(s32) + $v0 = COPY %3(<vscale x 2 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv4i1_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i1_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 false + %3:_(s32) = G_CONSTANT i32 2 + %0:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s32) + $v0 = COPY %0(<vscale x 4 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv4i1_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i1_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 true + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s32) + $v0 = COPY %0(<vscale x 4 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv4i1_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i1_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(s32) = COPY $x10 + %0:_(s1) = G_TRUNC %1(s32) + %3:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF + %4:_(s32) = G_CONSTANT i32 0 + %2:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT %3, %0(s1), %4(s32) + $v0 = COPY %2(<vscale x 4 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv8i1_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i1_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 false + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s32) + $v0 = COPY %0(<vscale x 8 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv8i1_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i1_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 true + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s32) + $v0 = COPY %0(<vscale x 8 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv8i1_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv8i1_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[COPY1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %2:_(s32) = COPY $x10 + %0:_(s1) = G_TRUNC %2(s32) + %1:_(s32) = COPY $x11 + %4:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF + %3:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT %4, %0(s1), %1(s32) + $v0 = COPY %3(<vscale x 8 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv16i1_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i1_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 false + %3:_(s32) = G_CONSTANT i32 15 + %0:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s32) + $v0 = COPY %0(<vscale x 16 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv16i1_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i1_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 true + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s32) + $v0 = COPY %0(<vscale x 16 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv16i1_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv16i1_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[COPY1]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %2:_(s32) = COPY $x10 + %0:_(s1) = G_TRUNC %2(s32) + %1:_(s32) = COPY $x11 + %4:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF + %3:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT %4, %0(s1), %1(s32) + $v0 = COPY %3(<vscale x 16 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv4i1_3 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0, $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i1_3 + ; CHECK: liveins: $v0, $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY1]](s32) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s1), [[C]](s32) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %0:_(<vscale x 4 x s1>) = COPY $v0 + %2:_(s32) = COPY $x10 + %1:_(s1) = G_TRUNC %2(s32) + %4:_(s32) = G_CONSTANT i32 0 + %3:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT %0, %1(s1), %4(s32) + $v0 = COPY %3(<vscale x 4 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv1i8_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i8_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s32) + $v8 = COPY %0(<vscale x 1 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i8_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i8_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s32) + $v8 = COPY %0(<vscale x 1 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i8_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv1i8_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %1(s32) + %3:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF + %4:_(s32) = G_CONSTANT i32 0 + %2:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT %3, %0(s8), %4(s32) + $v8 = COPY %2(<vscale x 1 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i8_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i8_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s32) + $v8 = COPY %0(<vscale x 2 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i8_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i8_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s32) + $v8 = COPY %0(<vscale x 2 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i8_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv2i8_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %1(s32) + %3:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF + %4:_(s32) = G_CONSTANT i32 0 + %2:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT %3, %0(s8), %4(s32) + $v8 = COPY %2(<vscale x 2 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i8_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i8_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s32) + $v8 = COPY %0(<vscale x 4 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i8_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i8_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s32) + $v8 = COPY %0(<vscale x 4 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i8_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i8_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %1(s32) + %3:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF + %4:_(s32) = G_CONSTANT i32 0 + %2:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT %3, %0(s8), %4(s32) + $v8 = COPY %2(<vscale x 4 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv8i8_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i8_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s32) + $v8 = COPY %0(<vscale x 8 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv8i8_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i8_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s32) + $v8 = COPY %0(<vscale x 8 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv8i8_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv8i8_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %1(s32) + %3:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF + %4:_(s32) = G_CONSTANT i32 0 + %2:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT %3, %0(s8), %4(s32) + $v8 = COPY %2(<vscale x 8 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv16i8_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i8_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s32) + $v8m2 = COPY %0(<vscale x 16 x s8>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv16i8_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i8_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s32) + $v8m2 = COPY %0(<vscale x 16 x s8>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv16i8_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12 + + ; CHECK-LABEL: name: insertelement_nxv16i8_2 + ; CHECK: liveins: $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[COPY1]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %4:_(s32) = COPY $x12 + %1:_(s64) = G_MERGE_VALUES %3(s32), %4(s32) + %6:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF + %7:_(s32) = G_TRUNC %1(s64) + %5:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT %6, %0(s8), %7(s32) + $v8m2 = COPY %5(<vscale x 16 x s8>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv4i8_3 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8, $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i8_3 + ; CHECK: liveins: $v8, $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s8), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %0:_(<vscale x 4 x s8>) = COPY $v8 + %2:_(s32) = COPY $x10 + %1:_(s8) = G_TRUNC %2(s32) + %4:_(s32) = G_CONSTANT i32 0 + %3:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT %0, %1(s8), %4(s32) + $v8 = COPY %3(<vscale x 4 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i16_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i16_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s32) + $v8 = COPY %0(<vscale x 1 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i16_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i16_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s32) + $v8 = COPY %0(<vscale x 1 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i16_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv1i16_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %1(s32) + %3:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF + %4:_(s32) = G_CONSTANT i32 0 + %2:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT %3, %0(s16), %4(s32) + $v8 = COPY %2(<vscale x 1 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i16_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i16_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 0 + %3:_(s32) = G_CONSTANT i32 1 + %0:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s32) + $v8 = COPY %0(<vscale x 2 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i16_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i16_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s32) + $v8 = COPY %0(<vscale x 2 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i16_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv2i16_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %1(s32) + %3:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF + %4:_(s32) = G_CONSTANT i32 0 + %2:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT %3, %0(s16), %4(s32) + $v8 = COPY %2(<vscale x 2 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i16_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i16_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s32) + $v8 = COPY %0(<vscale x 4 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i16_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i16_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s32) + $v8 = COPY %0(<vscale x 4 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i16_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i16_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %1(s32) + %3:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF + %4:_(s32) = G_CONSTANT i32 0 + %2:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT %3, %0(s16), %4(s32) + $v8 = COPY %2(<vscale x 4 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv8i16_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i16_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s32) + $v8m2 = COPY %0(<vscale x 8 x s16>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv8i16_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i16_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s32) + $v8m2 = COPY %0(<vscale x 8 x s16>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv8i16_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv8i16_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %1(s32) + %3:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF + %4:_(s32) = G_CONSTANT i32 0 + %2:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT %3, %0(s16), %4(s32) + $v8m2 = COPY %2(<vscale x 8 x s16>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv16i16_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i16_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s32) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s32) + $v8m4 = COPY %0(<vscale x 16 x s16>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv16i16_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i16_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s32) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s32) + $v8m4 = COPY %0(<vscale x 16 x s16>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv16i16_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv16i16_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %1(s32) + %3:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF + %4:_(s32) = G_CONSTANT i32 0 + %2:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT %3, %0(s16), %4(s32) + $v8m4 = COPY %2(<vscale x 16 x s16>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv4i16 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8, $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i16 + ; CHECK: liveins: $v8, $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s16), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %0:_(<vscale x 4 x s16>) = COPY $v8 + %2:_(s32) = COPY $x10 + %1:_(s16) = G_TRUNC %2(s32) + %4:_(s32) = G_CONSTANT i32 0 + %3:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT %0, %1(s16), %4(s32) + $v8 = COPY %3(<vscale x 4 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i32_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i32_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %2(s32) + $v8 = COPY %0(<vscale x 1 x s32>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i32_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i32_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s32) + $v8 = COPY %0(<vscale x 1 x s32>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i32_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv1i32_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %0:_(s32) = COPY $x10 + %2:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF + %3:_(s32) = G_CONSTANT i32 0 + %1:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT %2, %0(s32), %3(s32) + $v8 = COPY %1(<vscale x 1 x s32>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i32_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i32_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %2(s32) + $v8 = COPY %0(<vscale x 2 x s32>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i32_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i32_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s32) + $v8 = COPY %0(<vscale x 2 x s32>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i32_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv2i32_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %0:_(s32) = COPY $x10 + %2:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF + %3:_(s32) = G_CONSTANT i32 0 + %1:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT %2, %0(s32), %3(s32) + $v8 = COPY %1(<vscale x 2 x s32>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i32_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i32_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %2(s32) + $v8m2 = COPY %0(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv4i32_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i32_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s32) + $v8m2 = COPY %0(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv4i32_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i32_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %0:_(s32) = COPY $x10 + %2:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF + %3:_(s32) = G_CONSTANT i32 0 + %1:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT %2, %0(s32), %3(s32) + $v8m2 = COPY %1(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv8i32_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i32_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %2(s32) + $v8m4 = COPY %0(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv8i32_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i32_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s32) + $v8m4 = COPY %0(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv8i32_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv8i32_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %0:_(s32) = COPY $x10 + %2:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF + %3:_(s32) = G_CONSTANT i32 0 + %1:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT %2, %0(s32), %3(s32) + $v8m4 = COPY %1(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv16i32_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i32_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32) + ; CHECK-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %2(s32) + $v8m8 = COPY %0(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 +... +--- +name: insertelement_nxv16i32_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i32_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32) + ; CHECK-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s32) + $v8m8 = COPY %0(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 +... +--- +name: insertelement_nxv16i32_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv16i32_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32) + ; CHECK-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %0:_(s32) = COPY $x10 + %2:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF + %3:_(s32) = G_CONSTANT i32 0 + %1:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT %2, %0(s32), %3(s32) + $v8m8 = COPY %1(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 +... +--- +name: insertelement_nxv4i32 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $v8m2 + + ; CHECK-LABEL: name: insertelement_nxv4i32 + ; CHECK: liveins: $x10, $v8m2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[COPY]], [[COPY1]](s32), [[C]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %0:_(<vscale x 4 x s32>) = COPY $v8m2 + %1:_(s32) = COPY $x10 + %3:_(s32) = G_CONSTANT i32 0 + %2:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT %0, %1(s32), %3(s32) + $v8m2 = COPY %2(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv1i64_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i64_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C]](s32) + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %3(s32) + $v8 = COPY %0(<vscale x 1 x s64>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i64_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i64_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C1]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %3(s32) + $v8 = COPY %0(<vscale x 1 x s64>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i64_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv1i64_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s32) = COPY $x10 + %2:_(s32) = COPY $x11 + %0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32) + %4:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF + %5:_(s32) = G_CONSTANT i32 0 + %3:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT %4, %0(s64), %5(s32) + $v8 = COPY %3(<vscale x 1 x s64>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i64_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i64_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C]](s32) + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %3(s32) + $v8m2 = COPY %0(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv2i64_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i64_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C1]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %3(s32) + $v8m2 = COPY %0(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv2i64_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv2i64_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(s32) = COPY $x10 + %2:_(s32) = COPY $x11 + %0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32) + %4:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF + %5:_(s32) = G_CONSTANT i32 0 + %3:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT %4, %0(s64), %5(s32) + $v8m2 = COPY %3(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv4i64_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i64_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C]](s32) + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %3(s32) + $v8m4 = COPY %0(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv4i64_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i64_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C1]](s32) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %3(s32) + $v8m4 = COPY %0(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv4i64_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv4i64_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(s32) = COPY $x10 + %2:_(s32) = COPY $x11 + %0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32) + %4:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF + %5:_(s32) = G_CONSTANT i32 0 + %3:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT %4, %0(s64), %5(s32) + $v8m4 = COPY %3(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv8i64_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i64_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C]](s32) + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32) + ; CHECK-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 0 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %3(s32) + $v8m8 = COPY %0(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 +... +--- +name: insertelement_nxv8i64_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i64_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C]](s32) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C1]](s32) + ; CHECK-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 -1 + %3:_(s32) = G_CONSTANT i32 0 + %0:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %3(s32) + $v8m8 = COPY %0(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 +... +--- +name: insertelement_nxv8i64_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv8i64_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32) + ; CHECK-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %1:_(s32) = COPY $x10 + %2:_(s32) = COPY $x11 + %0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32) + %4:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF + %5:_(s32) = G_CONSTANT i32 0 + %3:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT %4, %0(s64), %5(s32) + $v8m8 = COPY %3(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 +... +--- +name: insertelement_nxv4i64 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $v8m4 + + ; CHECK-LABEL: name: insertelement_nxv4i64 + ; CHECK: liveins: $x10, $x11, $v8m4 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x11 + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[COPY]], [[MV]](s64), [[C]](s32) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %0:_(<vscale x 4 x s64>) = COPY $v8m4 + %2:_(s32) = COPY $x10 + %3:_(s32) = COPY $x11 + %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32) + %5:_(s32) = G_CONSTANT i32 0 + %4:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT %0, %1(s64), %5(s32) + $v8m4 = COPY %4(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 +... diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-rv64.mir new file mode 100644 index 0000000..4c33ddc --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-rv64.mir @@ -0,0 +1,1731 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s + +--- +name: insertelement_nxv1i1_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i1_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 false + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s64) + $v0 = COPY %0(<vscale x 1 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv1i1_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i1_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 true + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s64) + $v0 = COPY %0(<vscale x 1 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv1i1_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv1i1_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]] + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[AND]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %2:_(s64) = COPY $x10 + %0:_(s1) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %5:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF + %6:_(s64) = G_ZEXT %1(s32) + %4:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT %5, %0(s1), %6(s64) + $v0 = COPY %4(<vscale x 1 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv2i1_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i1_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 false + %3:_(s64) = G_CONSTANT i64 1 + %0:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s64) + $v0 = COPY %0(<vscale x 2 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv2i1_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i1_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 true + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s64) + $v0 = COPY %0(<vscale x 2 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv2i1_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv2i1_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]] + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[AND]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %2:_(s64) = COPY $x10 + %0:_(s1) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %5:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF + %6:_(s64) = G_ZEXT %1(s32) + %4:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT %5, %0(s1), %6(s64) + $v0 = COPY %4(<vscale x 2 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv4i1_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i1_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 false + %3:_(s64) = G_CONSTANT i64 2 + %0:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s64) + $v0 = COPY %0(<vscale x 4 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv4i1_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i1_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 true + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s64) + $v0 = COPY %0(<vscale x 4 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv4i1_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i1_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(s64) = COPY $x10 + %0:_(s1) = G_TRUNC %1(s64) + %3:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT %3, %0(s1), %4(s64) + $v0 = COPY %2(<vscale x 4 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv8i1_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i1_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 false + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s64) + $v0 = COPY %0(<vscale x 8 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv8i1_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i1_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 true + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s64) + $v0 = COPY %0(<vscale x 8 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv8i1_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv8i1_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]] + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[AND]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %2:_(s64) = COPY $x10 + %0:_(s1) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %5:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF + %6:_(s64) = G_ZEXT %1(s32) + %4:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT %5, %0(s1), %6(s64) + $v0 = COPY %4(<vscale x 8 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv16i1_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i1_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 false + %3:_(s64) = G_CONSTANT i64 15 + %0:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s64) + $v0 = COPY %0(<vscale x 16 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv16i1_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i1_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C1]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF + %2:_(s1) = G_CONSTANT i1 true + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT %1, %2(s1), %3(s64) + $v0 = COPY %0(<vscale x 16 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv16i1_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv16i1_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]] + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[AND]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %2:_(s64) = COPY $x10 + %0:_(s1) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %5:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF + %6:_(s64) = G_ZEXT %1(s32) + %4:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT %5, %0(s1), %6(s64) + $v0 = COPY %4(<vscale x 16 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv4i1_3 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v0, $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i1_3 + ; CHECK: liveins: $v0, $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY1]](s64) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s1), [[C]](s64) + ; CHECK-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>) + ; CHECK-NEXT: PseudoRET implicit $v0 + %0:_(<vscale x 4 x s1>) = COPY $v0 + %2:_(s64) = COPY $x10 + %1:_(s1) = G_TRUNC %2(s64) + %4:_(s64) = G_CONSTANT i64 0 + %3:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT %0, %1(s1), %4(s64) + $v0 = COPY %3(<vscale x 4 x s1>) + PseudoRET implicit $v0 +... +--- +name: insertelement_nxv1i8_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i8_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s64) + $v8 = COPY %0(<vscale x 1 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i8_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i8_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s64) + $v8 = COPY %0(<vscale x 1 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i8_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv1i8_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %1(s64) + %3:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT %3, %0(s8), %4(s64) + $v8 = COPY %2(<vscale x 1 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i8_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i8_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s64) + $v8 = COPY %0(<vscale x 2 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i8_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i8_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s64) + $v8 = COPY %0(<vscale x 2 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i8_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv2i8_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %1(s64) + %3:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT %3, %0(s8), %4(s64) + $v8 = COPY %2(<vscale x 2 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i8_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i8_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s64) + $v8 = COPY %0(<vscale x 4 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i8_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i8_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s64) + $v8 = COPY %0(<vscale x 4 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i8_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i8_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %1(s64) + %3:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT %3, %0(s8), %4(s64) + $v8 = COPY %2(<vscale x 4 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv8i8_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i8_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s64) + $v8 = COPY %0(<vscale x 8 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv8i8_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i8_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s64) + $v8 = COPY %0(<vscale x 8 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv8i8_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv8i8_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %1(s64) + %3:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT %3, %0(s8), %4(s64) + $v8 = COPY %2(<vscale x 8 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv16i8_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i8_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s64) + $v8m2 = COPY %0(<vscale x 16 x s8>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv16i8_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i8_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C1]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF + %2:_(s8) = G_CONSTANT i8 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT %1, %2(s8), %3(s64) + $v8m2 = COPY %0(<vscale x 16 x s8>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv16i8_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: insertelement_nxv16i8_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[COPY1]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %1:_(s64) = COPY $x11 + %4:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF + %3:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT %4, %0(s8), %1(s64) + $v8m2 = COPY %3(<vscale x 16 x s8>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv4i8_3 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8, $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i8_3 + ; CHECK: liveins: $v8, $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s64) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s8), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %0:_(<vscale x 4 x s8>) = COPY $v8 + %2:_(s64) = COPY $x10 + %1:_(s8) = G_TRUNC %2(s64) + %4:_(s64) = G_CONSTANT i64 0 + %3:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT %0, %1(s8), %4(s64) + $v8 = COPY %3(<vscale x 4 x s8>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i16_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i16_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s64) + $v8 = COPY %0(<vscale x 1 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i16_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i16_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s64) + $v8 = COPY %0(<vscale x 1 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i16_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv1i16_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %1(s64) + %3:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT %3, %0(s16), %4(s64) + $v8 = COPY %2(<vscale x 1 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i16_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i16_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 0 + %3:_(s64) = G_CONSTANT i64 1 + %0:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s64) + $v8 = COPY %0(<vscale x 2 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i16_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i16_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s64) + $v8 = COPY %0(<vscale x 2 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i16_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv2i16_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %1(s64) + %3:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT %3, %0(s16), %4(s64) + $v8 = COPY %2(<vscale x 2 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i16_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i16_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s64) + $v8 = COPY %0(<vscale x 4 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i16_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i16_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s64) + $v8 = COPY %0(<vscale x 4 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i16_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i16_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %1(s64) + %3:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT %3, %0(s16), %4(s64) + $v8 = COPY %2(<vscale x 4 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv8i16_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i16_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s64) + $v8m2 = COPY %0(<vscale x 8 x s16>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv8i16_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i16_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s64) + $v8m2 = COPY %0(<vscale x 8 x s16>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv8i16_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv8i16_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %1(s64) + %3:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT %3, %0(s16), %4(s64) + $v8m2 = COPY %2(<vscale x 8 x s16>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv16i16_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i16_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s64) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s64) + $v8m4 = COPY %0(<vscale x 16 x s16>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv16i16_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i16_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C1]](s64) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF + %2:_(s16) = G_CONSTANT i16 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT %1, %2(s16), %3(s64) + $v8m4 = COPY %0(<vscale x 16 x s16>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv16i16_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv16i16_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s64) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %1(s64) + %3:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT %3, %0(s16), %4(s64) + $v8m4 = COPY %2(<vscale x 16 x s16>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv4i16 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $v8, $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i16 + ; CHECK: liveins: $v8, $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s16), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %0:_(<vscale x 4 x s16>) = COPY $v8 + %2:_(s64) = COPY $x10 + %1:_(s16) = G_TRUNC %2(s64) + %4:_(s64) = G_CONSTANT i64 0 + %3:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT %0, %1(s16), %4(s64) + $v8 = COPY %3(<vscale x 4 x s16>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i32_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i32_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s64) + $v8 = COPY %0(<vscale x 1 x s32>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i32_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i32_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s64) + $v8 = COPY %0(<vscale x 1 x s32>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i32_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv1i32_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %1(s64) + %3:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT %3, %0(s32), %4(s64) + $v8 = COPY %2(<vscale x 1 x s32>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i32_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i32_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s64) + $v8 = COPY %0(<vscale x 2 x s32>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i32_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i32_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s64) + $v8 = COPY %0(<vscale x 2 x s32>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i32_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv2i32_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %1(s64) + %3:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT %3, %0(s32), %4(s64) + $v8 = COPY %2(<vscale x 2 x s32>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv4i32_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i32_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C1]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s64) + $v8m2 = COPY %0(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv4i32_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i32_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C1]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s64) + $v8m2 = COPY %0(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv4i32_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i32_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %1(s64) + %3:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT %3, %0(s32), %4(s64) + $v8m2 = COPY %2(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv8i32_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i32_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C1]](s64) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s64) + $v8m4 = COPY %0(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv8i32_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i32_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C1]](s64) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s64) + $v8m4 = COPY %0(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv8i32_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv8i32_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s64) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %1(s64) + %3:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT %3, %0(s32), %4(s64) + $v8m4 = COPY %2(<vscale x 8 x s32>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv16i32_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i32_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C1]](s64) + ; CHECK-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 0 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s64) + $v8m8 = COPY %0(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 +... +--- +name: insertelement_nxv16i32_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv16i32_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C1]](s64) + ; CHECK-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF + %2:_(s32) = G_CONSTANT i32 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT %1, %2(s32), %3(s64) + $v8m8 = COPY %0(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 +... +--- +name: insertelement_nxv16i32_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv16i32_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s64) + ; CHECK-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %1:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %1(s64) + %3:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF + %4:_(s64) = G_CONSTANT i64 0 + %2:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT %3, %0(s32), %4(s64) + $v8m8 = COPY %2(<vscale x 16 x s32>) + PseudoRET implicit $v8m8 +... +--- +name: insertelement_nxv4i32 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $v8m2 + + ; CHECK-LABEL: name: insertelement_nxv4i32 + ; CHECK: liveins: $x10, $v8m2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s32), [[C]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %0:_(<vscale x 4 x s32>) = COPY $v8m2 + %2:_(s64) = COPY $x10 + %1:_(s32) = G_TRUNC %2(s64) + %4:_(s64) = G_CONSTANT i64 0 + %3:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT %0, %1(s32), %4(s64) + $v8m2 = COPY %3(<vscale x 4 x s32>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv1i64_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i64_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %2(s64) + $v8 = COPY %0(<vscale x 1 x s64>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i64_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv1i64_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %1:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %3(s64) + $v8 = COPY %0(<vscale x 1 x s64>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv1i64_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv1i64_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s64) + ; CHECK-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8 + %0:_(s64) = COPY $x10 + %2:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF + %3:_(s64) = G_CONSTANT i64 0 + %1:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT %2, %0(s64), %3(s64) + $v8 = COPY %1(<vscale x 1 x s64>) + PseudoRET implicit $v8 +... +--- +name: insertelement_nxv2i64_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i64_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %2(s64) + $v8m2 = COPY %0(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv2i64_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv2i64_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %1:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %3(s64) + $v8m2 = COPY %0(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv2i64_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv2i64_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s64) + ; CHECK-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m2 + %0:_(s64) = COPY $x10 + %2:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF + %3:_(s64) = G_CONSTANT i64 0 + %1:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT %2, %0(s64), %3(s64) + $v8m2 = COPY %1(<vscale x 2 x s64>) + PseudoRET implicit $v8m2 +... +--- +name: insertelement_nxv4i64_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i64_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C]](s64) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %2(s64) + $v8m4 = COPY %0(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv4i64_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv4i64_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s64) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %1:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %3(s64) + $v8m4 = COPY %0(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv4i64_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv4i64_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s64) + ; CHECK-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m4 + %0:_(s64) = COPY $x10 + %2:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF + %3:_(s64) = G_CONSTANT i64 0 + %1:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT %2, %0(s64), %3(s64) + $v8m4 = COPY %1(<vscale x 4 x s64>) + PseudoRET implicit $v8m4 +... +--- +name: insertelement_nxv8i64_0 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i64_0 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C]](s64) + ; CHECK-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %2(s64) + $v8m8 = COPY %0(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 +... +--- +name: insertelement_nxv8i64_1 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + ; CHECK-LABEL: name: insertelement_nxv8i64_1 + ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s64) + ; CHECK-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %1:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF + %2:_(s64) = G_CONSTANT i64 -1 + %3:_(s64) = G_CONSTANT i64 0 + %0:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT %1, %2(s64), %3(s64) + $v8m8 = COPY %0(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 +... +--- +name: insertelement_nxv8i64_2 +legalized: false +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + + ; CHECK-LABEL: name: insertelement_nxv8i64_2 + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s64) + ; CHECK-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>) + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %0:_(s64) = COPY $x10 + %2:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF + %3:_(s64) = G_CONSTANT i64 0 + %1:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT %2, %0(s64), %3(s64) + $v8m8 = COPY %1(<vscale x 8 x s64>) + PseudoRET implicit $v8m8 +... diff --git a/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll b/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll index 2a46a59..4f036d3 100644 --- a/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll +++ b/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll @@ -221,8 +221,8 @@ define i64 @test12(i64 %0) #0 { ; ; RV64-LABEL: test12: ; RV64: # %bb.0: # %entry -; RV64-NEXT: addiw a0, a0, -16 -; RV64-NEXT: addi a0, a0, 13 +; RV64-NEXT: addi a0, a0, -16 +; RV64-NEXT: addiw a0, a0, 13 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll index f3529b1..22c2d81 100644 --- a/llvm/test/CodeGen/RISCV/attributes.ll +++ b/llvm/test/CodeGen/RISCV/attributes.ll @@ -80,6 +80,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+xwchc %s -o - | FileCheck --check-prefix=RV32XWCHC %s ; RUN: llc -mtriple=riscv32 -mattr=+zaamo %s -o - | FileCheck --check-prefix=RV32ZAAMO %s ; RUN: llc -mtriple=riscv32 -mattr=+zalrsc %s -o - | FileCheck --check-prefix=RV32ZALRSC %s +; RUN: llc -mtriple=riscv32 -mattr=+zaamo,+zalrsc %s -o - | FileCheck --check-prefixes=CHECK,RV32COMBINEINTOA %s ; RUN: llc -mtriple=riscv32 -mattr=+zca %s -o - | FileCheck --check-prefixes=CHECK,RV32ZCA %s ; RUN: llc -mtriple=riscv32 -mattr=+zcb %s -o - | FileCheck --check-prefixes=CHECK,RV32ZCB %s ; RUN: llc -mtriple=riscv32 -mattr=+zcd %s -o - | FileCheck --check-prefixes=CHECK,RV32ZCD %s @@ -227,6 +228,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+ztso %s -o - | FileCheck --check-prefixes=CHECK,RV64ZTSO %s ; RUN: llc -mtriple=riscv64 -mattr=+zaamo %s -o - | FileCheck --check-prefix=RV64ZAAMO %s ; RUN: llc -mtriple=riscv64 -mattr=+zalrsc %s -o - | FileCheck --check-prefix=RV64ZALRSC %s +; RUN: llc -mtriple=riscv64 -mattr=+zaamo,+zalrsc %s -o - | FileCheck --check-prefixes=CHECK,RV64COMBINEINTOA %s ; RUN: llc -mtriple=riscv64 -mattr=+zca %s -o - | FileCheck --check-prefixes=CHECK,RV64ZCA %s ; RUN: llc -mtriple=riscv64 -mattr=+zcb %s -o - | FileCheck --check-prefixes=CHECK,RV64ZCB %s ; RUN: llc -mtriple=riscv64 -mattr=+zcd %s -o - | FileCheck --check-prefixes=CHECK,RV64ZCD %s @@ -392,6 +394,7 @@ ; RV32XWCHC: .attribute 5, "rv32i2p1_zca1p0_xwchc2p2" ; RV32ZAAMO: .attribute 5, "rv32i2p1_zaamo1p0" ; RV32ZALRSC: .attribute 5, "rv32i2p1_zalrsc1p0" +; RV32COMBINEINTOA: .attribute 5, "rv32i2p1_a2p1_zaamo1p0_zalrsc1p0" ; RV32ZCA: .attribute 5, "rv32i2p1_zca1p0" ; RV32ZCB: .attribute 5, "rv32i2p1_zca1p0_zcb1p0" ; RV32ZCD: .attribute 5, "rv32i2p1_f2p2_d2p2_zicsr2p0_zca1p0_zcd1p0" @@ -537,6 +540,7 @@ ; RV64ZTSO: .attribute 5, "rv64i2p1_ztso1p0" ; RV64ZAAMO: .attribute 5, "rv64i2p1_zaamo1p0" ; RV64ZALRSC: .attribute 5, "rv64i2p1_zalrsc1p0" +; RV64COMBINEINTOA: .attribute 5, "rv64i2p1_a2p1_zaamo1p0_zalrsc1p0" ; RV64ZCA: .attribute 5, "rv64i2p1_zca1p0" ; RV64ZCB: .attribute 5, "rv64i2p1_zca1p0_zcb1p0" ; RV64ZCD: .attribute 5, "rv64i2p1_f2p2_d2p2_zicsr2p0_zca1p0_zcd1p0" diff --git a/llvm/test/CodeGen/RISCV/branch-rel.mir b/llvm/test/CodeGen/RISCV/branch-rel.mir new file mode 100644 index 0000000..1ed5f57 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/branch-rel.mir @@ -0,0 +1,39 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +# RUN: llc %s -mtriple=riscv64 -run-pass=branch-relaxation -o - -verify-machineinstrs | FileCheck %s + +--- | + define void @foo() { + ret void + } +... +--- +name: foo +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: foo + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x80000000) + ; CHECK-NEXT: liveins: $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: PseudoBR %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.2(0x80000000) + ; CHECK-NEXT: liveins: $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: INLINEASM &".space 4096", 1 /* sideeffect attdialect */ + ; CHECK-NEXT: BGE $x1, $x0, %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: PseudoRET + bb.0: + liveins: $x1 + BNE $x1, $x0, %bb.3 + PseudoBR %bb.3 + bb.1: + liveins: $x1 + INLINEASM &".space 4096", 1 + BGE $x1, $x0, %bb.3 + bb.3: + PseudoRET +## NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: diff --git a/llvm/test/CodeGen/RISCV/i64-icmp.ll b/llvm/test/CodeGen/RISCV/i64-icmp.ll index 88d989d..2742b9a 100644 --- a/llvm/test/CodeGen/RISCV/i64-icmp.ll +++ b/llvm/test/CodeGen/RISCV/i64-icmp.ll @@ -708,8 +708,7 @@ define i64 @icmp_sle_constant_neg_2050(i64 %a) nounwind { define i64 @icmp_eq_zext_inreg_small_constant(i64 %a) nounwind { ; RV64I-LABEL: icmp_eq_zext_inreg_small_constant: ; RV64I: # %bb.0: -; RV64I-NEXT: sext.w a0, a0 -; RV64I-NEXT: addi a0, a0, -123 +; RV64I-NEXT: addiw a0, a0, -123 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ret %1 = and i64 %a, 4294967295 @@ -748,8 +747,7 @@ define i64 @icmp_ne_zext_inreg_small_constant(i64 %a) nounwind { define i64 @icmp_ne_zext_inreg_large_constant(i64 %a) nounwind { ; RV64I-LABEL: icmp_ne_zext_inreg_large_constant: ; RV64I: # %bb.0: -; RV64I-NEXT: sext.w a0, a0 -; RV64I-NEXT: addi a0, a0, 2 +; RV64I-NEXT: addiw a0, a0, 2 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: ret %1 = and i64 %a, 4294967295 diff --git a/llvm/test/CodeGen/RISCV/idiv_large.ll b/llvm/test/CodeGen/RISCV/idiv_large.ll index 9937627..d7b00f6 100644 --- a/llvm/test/CodeGen/RISCV/idiv_large.ll +++ b/llvm/test/CodeGen/RISCV/idiv_large.ll @@ -1,16 +1,2315 @@ -; RUN: llc -mtriple=riscv32 < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 < %s | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=riscv32 < %s | FileCheck %s --check-prefix=RV32 +; RUN: llc -mtriple=riscv64 < %s | FileCheck %s --check-prefix=RV64 + +define i64 @udiv_i64(i64 %x, i64 %y) nounwind { +; RV32-LABEL: udiv_i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: call __udivdi3 +; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: udiv_i64: +; RV64: # %bb.0: +; RV64-NEXT: tail __udivdi3 + %res = udiv i64 %x, %y + ret i64 %res +} + +define i65 @udiv_i65(i65 %x, i65 %y) nounwind { +; RV32-LABEL: udiv_i65: +; RV32: # %bb.0: # %_udiv-special-cases +; RV32-NEXT: lw a3, 0(a2) +; RV32-NEXT: lw a4, 4(a2) +; RV32-NEXT: lw t1, 8(a2) +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: lui a5, 209715 +; RV32-NEXT: lui a6, 61681 +; RV32-NEXT: addi t0, a2, 1365 +; RV32-NEXT: addi a7, a5, 819 +; RV32-NEXT: addi a6, a6, -241 +; RV32-NEXT: srli a2, a4, 1 +; RV32-NEXT: slli a5, t1, 31 +; RV32-NEXT: slli t3, a4, 31 +; RV32-NEXT: or t2, a5, a2 +; RV32-NEXT: srli a2, a3, 1 +; RV32-NEXT: or t4, a2, t3 +; RV32-NEXT: bnez t2, .LBB1_2 +; RV32-NEXT: # %bb.1: # %_udiv-special-cases +; RV32-NEXT: srli a2, t4, 1 +; RV32-NEXT: or a2, t4, a2 +; RV32-NEXT: srli a5, a2, 2 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: srli a5, a2, 4 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: srli a5, a2, 8 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: srli a5, a2, 16 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: not a2, a2 +; RV32-NEXT: srli a5, a2, 1 +; RV32-NEXT: and a5, a5, t0 +; RV32-NEXT: sub a2, a2, a5 +; RV32-NEXT: and a5, a2, a7 +; RV32-NEXT: srli a2, a2, 2 +; RV32-NEXT: and a2, a2, a7 +; RV32-NEXT: add a2, a5, a2 +; RV32-NEXT: srli a5, a2, 4 +; RV32-NEXT: add a2, a2, a5 +; RV32-NEXT: and a2, a2, a6 +; RV32-NEXT: slli a5, a2, 8 +; RV32-NEXT: add a2, a2, a5 +; RV32-NEXT: slli a5, a2, 16 +; RV32-NEXT: add a2, a2, a5 +; RV32-NEXT: srli a2, a2, 24 +; RV32-NEXT: addi t3, a2, 32 +; RV32-NEXT: j .LBB1_3 +; RV32-NEXT: .LBB1_2: +; RV32-NEXT: srli a2, t2, 1 +; RV32-NEXT: or a2, t2, a2 +; RV32-NEXT: srli a5, a2, 2 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: srli a5, a2, 4 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: srli a5, a2, 8 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: srli a5, a2, 16 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: not a2, a2 +; RV32-NEXT: srli a5, a2, 1 +; RV32-NEXT: and a5, a5, t0 +; RV32-NEXT: sub a2, a2, a5 +; RV32-NEXT: and a5, a2, a7 +; RV32-NEXT: srli a2, a2, 2 +; RV32-NEXT: and a2, a2, a7 +; RV32-NEXT: add a2, a5, a2 +; RV32-NEXT: srli a5, a2, 4 +; RV32-NEXT: add a2, a2, a5 +; RV32-NEXT: and a2, a2, a6 +; RV32-NEXT: slli a5, a2, 8 +; RV32-NEXT: add a2, a2, a5 +; RV32-NEXT: slli a5, a2, 16 +; RV32-NEXT: add a2, a2, a5 +; RV32-NEXT: srli t3, a2, 24 +; RV32-NEXT: .LBB1_3: # %_udiv-special-cases +; RV32-NEXT: addi sp, sp, -96 +; RV32-NEXT: sw s0, 92(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s1, 88(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s2, 84(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s3, 80(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s4, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s5, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s6, 68(sp) # 4-byte Folded Spill +; RV32-NEXT: slli a2, a3, 31 +; RV32-NEXT: li t5, 64 +; RV32-NEXT: bnez a2, .LBB1_5 +; RV32-NEXT: # %bb.4: # %_udiv-special-cases +; RV32-NEXT: li s0, 64 +; RV32-NEXT: j .LBB1_6 +; RV32-NEXT: .LBB1_5: +; RV32-NEXT: srli a5, a2, 1 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: srli a5, a2, 2 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: srli a5, a2, 4 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: srli a5, a2, 8 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: srli a5, a2, 16 +; RV32-NEXT: or a2, a2, a5 +; RV32-NEXT: not a2, a2 +; RV32-NEXT: srli a5, a2, 1 +; RV32-NEXT: and a5, a5, t0 +; RV32-NEXT: sub a2, a2, a5 +; RV32-NEXT: and a5, a2, a7 +; RV32-NEXT: srli a2, a2, 2 +; RV32-NEXT: and a2, a2, a7 +; RV32-NEXT: add a2, a5, a2 +; RV32-NEXT: srli a5, a2, 4 +; RV32-NEXT: add a2, a2, a5 +; RV32-NEXT: and a2, a2, a6 +; RV32-NEXT: slli a5, a2, 8 +; RV32-NEXT: add a2, a2, a5 +; RV32-NEXT: slli a5, a2, 16 +; RV32-NEXT: add a2, a2, a5 +; RV32-NEXT: srli s0, a2, 24 +; RV32-NEXT: .LBB1_6: # %_udiv-special-cases +; RV32-NEXT: lw a5, 0(a1) +; RV32-NEXT: lw a2, 4(a1) +; RV32-NEXT: lw s2, 8(a1) +; RV32-NEXT: or a1, t4, t2 +; RV32-NEXT: addi s1, s0, 64 +; RV32-NEXT: bnez a1, .LBB1_8 +; RV32-NEXT: # %bb.7: # %_udiv-special-cases +; RV32-NEXT: mv t3, s1 +; RV32-NEXT: .LBB1_8: # %_udiv-special-cases +; RV32-NEXT: snez s4, a1 +; RV32-NEXT: srli a1, a2, 1 +; RV32-NEXT: slli t2, s2, 31 +; RV32-NEXT: slli t4, a2, 31 +; RV32-NEXT: or a1, t2, a1 +; RV32-NEXT: srli t2, a5, 1 +; RV32-NEXT: or t6, t2, t4 +; RV32-NEXT: bnez a1, .LBB1_10 +; RV32-NEXT: # %bb.9: # %_udiv-special-cases +; RV32-NEXT: srli t2, t6, 1 +; RV32-NEXT: or t2, t6, t2 +; RV32-NEXT: srli t4, t2, 2 +; RV32-NEXT: or t2, t2, t4 +; RV32-NEXT: srli t4, t2, 4 +; RV32-NEXT: or t2, t2, t4 +; RV32-NEXT: srli t4, t2, 8 +; RV32-NEXT: or t2, t2, t4 +; RV32-NEXT: srli t4, t2, 16 +; RV32-NEXT: or t2, t2, t4 +; RV32-NEXT: not t2, t2 +; RV32-NEXT: srli t4, t2, 1 +; RV32-NEXT: and t4, t4, t0 +; RV32-NEXT: sub t2, t2, t4 +; RV32-NEXT: and t4, t2, a7 +; RV32-NEXT: srli t2, t2, 2 +; RV32-NEXT: and t2, t2, a7 +; RV32-NEXT: add t2, t4, t2 +; RV32-NEXT: srli t4, t2, 4 +; RV32-NEXT: add t2, t2, t4 +; RV32-NEXT: and t2, t2, a6 +; RV32-NEXT: slli t4, t2, 8 +; RV32-NEXT: add t2, t2, t4 +; RV32-NEXT: slli t4, t2, 16 +; RV32-NEXT: add t2, t2, t4 +; RV32-NEXT: srli t2, t2, 24 +; RV32-NEXT: addi s3, t2, 32 +; RV32-NEXT: j .LBB1_11 +; RV32-NEXT: .LBB1_10: +; RV32-NEXT: srli t2, a1, 1 +; RV32-NEXT: or t2, a1, t2 +; RV32-NEXT: srli t4, t2, 2 +; RV32-NEXT: or t2, t2, t4 +; RV32-NEXT: srli t4, t2, 4 +; RV32-NEXT: or t2, t2, t4 +; RV32-NEXT: srli t4, t2, 8 +; RV32-NEXT: or t2, t2, t4 +; RV32-NEXT: srli t4, t2, 16 +; RV32-NEXT: or t2, t2, t4 +; RV32-NEXT: not t2, t2 +; RV32-NEXT: srli t4, t2, 1 +; RV32-NEXT: and t4, t4, t0 +; RV32-NEXT: sub t2, t2, t4 +; RV32-NEXT: and t4, t2, a7 +; RV32-NEXT: srli t2, t2, 2 +; RV32-NEXT: and t2, t2, a7 +; RV32-NEXT: add t2, t4, t2 +; RV32-NEXT: srli t4, t2, 4 +; RV32-NEXT: add t2, t2, t4 +; RV32-NEXT: and t2, t2, a6 +; RV32-NEXT: slli t4, t2, 8 +; RV32-NEXT: add t2, t2, t4 +; RV32-NEXT: slli t4, t2, 16 +; RV32-NEXT: add t2, t2, t4 +; RV32-NEXT: srli s3, t2, 24 +; RV32-NEXT: .LBB1_11: # %_udiv-special-cases +; RV32-NEXT: andi t4, s2, 1 +; RV32-NEXT: andi t1, t1, 1 +; RV32-NEXT: or t2, a3, a4 +; RV32-NEXT: or s2, a5, a2 +; RV32-NEXT: sltu s0, s1, s0 +; RV32-NEXT: slli s1, a5, 31 +; RV32-NEXT: addi s4, s4, -1 +; RV32-NEXT: beqz s1, .LBB1_13 +; RV32-NEXT: # %bb.12: +; RV32-NEXT: srli t5, s1, 1 +; RV32-NEXT: or t5, s1, t5 +; RV32-NEXT: srli s1, t5, 2 +; RV32-NEXT: or t5, t5, s1 +; RV32-NEXT: srli s1, t5, 4 +; RV32-NEXT: or t5, t5, s1 +; RV32-NEXT: srli s1, t5, 8 +; RV32-NEXT: or t5, t5, s1 +; RV32-NEXT: srli s1, t5, 16 +; RV32-NEXT: or t5, t5, s1 +; RV32-NEXT: not t5, t5 +; RV32-NEXT: srli s1, t5, 1 +; RV32-NEXT: and t0, s1, t0 +; RV32-NEXT: sub t0, t5, t0 +; RV32-NEXT: and t5, t0, a7 +; RV32-NEXT: srli t0, t0, 2 +; RV32-NEXT: and a7, t0, a7 +; RV32-NEXT: add a7, t5, a7 +; RV32-NEXT: srli t0, a7, 4 +; RV32-NEXT: add a7, a7, t0 +; RV32-NEXT: and a6, a7, a6 +; RV32-NEXT: slli a7, a6, 8 +; RV32-NEXT: add a6, a6, a7 +; RV32-NEXT: slli a7, a6, 16 +; RV32-NEXT: add a6, a6, a7 +; RV32-NEXT: srli t5, a6, 24 +; RV32-NEXT: .LBB1_13: # %_udiv-special-cases +; RV32-NEXT: or t0, t2, t1 +; RV32-NEXT: or a6, s2, t4 +; RV32-NEXT: and a7, s4, s0 +; RV32-NEXT: or t6, t6, a1 +; RV32-NEXT: addi s0, t5, 64 +; RV32-NEXT: bnez t6, .LBB1_15 +; RV32-NEXT: # %bb.14: # %_udiv-special-cases +; RV32-NEXT: mv s3, s0 +; RV32-NEXT: .LBB1_15: # %_udiv-special-cases +; RV32-NEXT: seqz a1, t0 +; RV32-NEXT: sltu t0, s0, t5 +; RV32-NEXT: snez t5, t6 +; RV32-NEXT: addi t5, t5, -1 +; RV32-NEXT: and t0, t5, t0 +; RV32-NEXT: sltu t5, t3, s3 +; RV32-NEXT: seqz a6, a6 +; RV32-NEXT: mv t6, t5 +; RV32-NEXT: beq a7, t0, .LBB1_17 +; RV32-NEXT: # %bb.16: # %_udiv-special-cases +; RV32-NEXT: sltu t6, a7, t0 +; RV32-NEXT: .LBB1_17: # %_udiv-special-cases +; RV32-NEXT: or a1, a1, a6 +; RV32-NEXT: andi a6, t6, 1 +; RV32-NEXT: sub a7, a7, t0 +; RV32-NEXT: sub t5, a7, t5 +; RV32-NEXT: sub a7, t3, s3 +; RV32-NEXT: beqz a6, .LBB1_19 +; RV32-NEXT: # %bb.18: # %_udiv-special-cases +; RV32-NEXT: mv t0, a6 +; RV32-NEXT: j .LBB1_20 +; RV32-NEXT: .LBB1_19: +; RV32-NEXT: sltiu t0, a7, 65 +; RV32-NEXT: xori t0, t0, 1 +; RV32-NEXT: snez t3, t5 +; RV32-NEXT: or t0, t0, t3 +; RV32-NEXT: .LBB1_20: # %_udiv-special-cases +; RV32-NEXT: or t6, a1, t0 +; RV32-NEXT: addi a1, t6, -1 +; RV32-NEXT: and t3, t4, a1 +; RV32-NEXT: and t0, a1, a2 +; RV32-NEXT: and a1, a1, a5 +; RV32-NEXT: bnez t6, .LBB1_30 +; RV32-NEXT: # %bb.21: # %_udiv-special-cases +; RV32-NEXT: xori t6, a7, 64 +; RV32-NEXT: or t6, t6, a6 +; RV32-NEXT: or t6, t6, t5 +; RV32-NEXT: beqz t6, .LBB1_30 +; RV32-NEXT: # %bb.22: # %udiv-bb1 +; RV32-NEXT: addi a1, a7, 1 +; RV32-NEXT: sw zero, 32(sp) +; RV32-NEXT: sw zero, 36(sp) +; RV32-NEXT: sw zero, 40(sp) +; RV32-NEXT: sw zero, 44(sp) +; RV32-NEXT: sw a5, 48(sp) +; RV32-NEXT: sw a2, 52(sp) +; RV32-NEXT: sw t4, 56(sp) +; RV32-NEXT: li t0, 64 +; RV32-NEXT: addi t3, sp, 48 +; RV32-NEXT: neg s1, a7 +; RV32-NEXT: seqz t6, a1 +; RV32-NEXT: sub a7, t0, a7 +; RV32-NEXT: add t5, t5, t6 +; RV32-NEXT: andi t0, a7, 31 +; RV32-NEXT: srli a7, a7, 3 +; RV32-NEXT: or t6, a1, t5 +; RV32-NEXT: xori s2, t0, 31 +; RV32-NEXT: andi a7, a7, 12 +; RV32-NEXT: seqz t0, t6 +; RV32-NEXT: sub s3, t3, a7 +; RV32-NEXT: add a6, a6, t0 +; RV32-NEXT: lw t3, 0(s3) +; RV32-NEXT: lw s4, 4(s3) +; RV32-NEXT: andi a7, a6, 1 +; RV32-NEXT: or t6, t6, a7 +; RV32-NEXT: srli a6, t3, 1 +; RV32-NEXT: sll t0, s4, s1 +; RV32-NEXT: srl a6, a6, s2 +; RV32-NEXT: or t0, t0, a6 +; RV32-NEXT: sll a6, t3, s1 +; RV32-NEXT: li t3, 0 +; RV32-NEXT: beqz t6, .LBB1_28 +; RV32-NEXT: # %bb.23: # %udiv-preheader +; RV32-NEXT: li t6, 0 +; RV32-NEXT: li s0, 0 +; RV32-NEXT: srli s4, s4, 1 +; RV32-NEXT: lw s3, 8(s3) +; RV32-NEXT: sw zero, 16(sp) +; RV32-NEXT: sw zero, 20(sp) +; RV32-NEXT: sw zero, 24(sp) +; RV32-NEXT: sw zero, 28(sp) +; RV32-NEXT: sw a5, 0(sp) +; RV32-NEXT: sw a2, 4(sp) +; RV32-NEXT: sw t4, 8(sp) +; RV32-NEXT: sw zero, 12(sp) +; RV32-NEXT: srli a2, a1, 3 +; RV32-NEXT: srl a5, s4, s2 +; RV32-NEXT: mv t4, sp +; RV32-NEXT: snez t2, t2 +; RV32-NEXT: andi a2, a2, 12 +; RV32-NEXT: add t1, t1, t2 +; RV32-NEXT: add a2, t4, a2 +; RV32-NEXT: lw t2, 0(a2) +; RV32-NEXT: lw t4, 4(a2) +; RV32-NEXT: lw a2, 8(a2) +; RV32-NEXT: sll s1, s3, s1 +; RV32-NEXT: andi s2, a1, 31 +; RV32-NEXT: xori s2, s2, 31 +; RV32-NEXT: or s3, s1, a5 +; RV32-NEXT: slli a2, a2, 1 +; RV32-NEXT: slli a5, t4, 1 +; RV32-NEXT: sll a2, a2, s2 +; RV32-NEXT: sll s2, a5, s2 +; RV32-NEXT: srl s1, t4, a1 +; RV32-NEXT: or s1, s1, a2 +; RV32-NEXT: seqz a2, a3 +; RV32-NEXT: sub a2, a4, a2 +; RV32-NEXT: addi a5, t1, 1 +; RV32-NEXT: andi a5, a5, 1 +; RV32-NEXT: andi s3, s3, 1 +; RV32-NEXT: srl t1, t2, a1 +; RV32-NEXT: or s2, t1, s2 +; RV32-NEXT: addi t1, a3, -1 +; RV32-NEXT: j .LBB1_26 +; RV32-NEXT: .LBB1_24: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB1_26 Depth=1 +; RV32-NEXT: sltu t2, a2, s4 +; RV32-NEXT: .LBB1_25: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB1_26 Depth=1 +; RV32-NEXT: srli s1, s1, 31 +; RV32-NEXT: sub t4, a5, s1 +; RV32-NEXT: sub t2, t4, t2 +; RV32-NEXT: slli t2, t2, 31 +; RV32-NEXT: srai s1, t2, 31 +; RV32-NEXT: and s3, s1, a4 +; RV32-NEXT: li t2, 0 +; RV32-NEXT: li t4, 0 +; RV32-NEXT: srli s5, a6, 31 +; RV32-NEXT: sub s4, s4, s3 +; RV32-NEXT: slli s3, t0, 1 +; RV32-NEXT: or s3, s3, s5 +; RV32-NEXT: srli t0, t0, 31 +; RV32-NEXT: slli a6, a6, 1 +; RV32-NEXT: or a6, t3, a6 +; RV32-NEXT: seqz t3, a1 +; RV32-NEXT: or s0, s0, t0 +; RV32-NEXT: or s5, a1, t5 +; RV32-NEXT: sub t5, t5, t3 +; RV32-NEXT: and s6, s1, a3 +; RV32-NEXT: addi a1, a1, -1 +; RV32-NEXT: andi t3, s1, 1 +; RV32-NEXT: or t0, t6, s3 +; RV32-NEXT: sltu t6, s2, s6 +; RV32-NEXT: snez s5, s5 +; RV32-NEXT: andi s3, s0, 1 +; RV32-NEXT: sub s1, s4, t6 +; RV32-NEXT: add a7, a7, s5 +; RV32-NEXT: addi a7, a7, 1 +; RV32-NEXT: andi a7, a7, 1 +; RV32-NEXT: or t6, a1, t5 +; RV32-NEXT: or s4, t6, a7 +; RV32-NEXT: sub s2, s2, s6 +; RV32-NEXT: li t6, 0 +; RV32-NEXT: li s0, 0 +; RV32-NEXT: beqz s4, .LBB1_29 +; RV32-NEXT: .LBB1_26: # %udiv-do-while +; RV32-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32-NEXT: srli t2, s2, 31 +; RV32-NEXT: slli t4, s1, 1 +; RV32-NEXT: slli s2, s2, 1 +; RV32-NEXT: or s4, t4, t2 +; RV32-NEXT: andi t2, s3, 1 +; RV32-NEXT: or s2, s2, t2 +; RV32-NEXT: bne a2, s4, .LBB1_24 +; RV32-NEXT: # %bb.27: # in Loop: Header=BB1_26 Depth=1 +; RV32-NEXT: sltu t2, t1, s2 +; RV32-NEXT: j .LBB1_25 +; RV32-NEXT: .LBB1_28: +; RV32-NEXT: li t2, 0 +; RV32-NEXT: li t4, 0 +; RV32-NEXT: .LBB1_29: # %udiv-loop-exit +; RV32-NEXT: srli a2, a6, 31 +; RV32-NEXT: slli a3, t0, 1 +; RV32-NEXT: srli a4, t0, 31 +; RV32-NEXT: slli a6, a6, 1 +; RV32-NEXT: or a1, t3, a6 +; RV32-NEXT: or a2, t2, a2 +; RV32-NEXT: or a4, t4, a4 +; RV32-NEXT: or t0, a2, a3 +; RV32-NEXT: andi t3, a4, 1 +; RV32-NEXT: .LBB1_30: # %udiv-end +; RV32-NEXT: andi a2, t3, 1 +; RV32-NEXT: sw a1, 0(a0) +; RV32-NEXT: sw t0, 4(a0) +; RV32-NEXT: sb a2, 8(a0) +; RV32-NEXT: lw s0, 92(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s1, 88(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s2, 84(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s3, 80(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s4, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s5, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s6, 68(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 96 +; RV32-NEXT: ret +; +; RV64-LABEL: udiv_i65: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64-NEXT: andi a1, a1, 1 +; RV64-NEXT: andi a3, a3, 1 +; RV64-NEXT: call __udivti3 +; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %res = udiv i65 %x, %y + ret i65 %res +} define i128 @udiv_i128(i128 %x, i128 %y) nounwind { -; CHECK-LABEL: udiv_i128: -; CHECK: call __udivti3 +; RV32-LABEL: udiv_i128: +; RV32: # %bb.0: # %_udiv-special-cases +; RV32-NEXT: addi sp, sp, -160 +; RV32-NEXT: sw ra, 156(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 152(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s1, 148(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s2, 144(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s3, 140(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s4, 136(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s5, 132(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s6, 128(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s7, 124(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s8, 120(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s9, 116(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s10, 112(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s11, 108(sp) # 4-byte Folded Spill +; RV32-NEXT: mv s7, a0 +; RV32-NEXT: lw s8, 0(a2) +; RV32-NEXT: lw s9, 4(a2) +; RV32-NEXT: lw s11, 8(a2) +; RV32-NEXT: lw ra, 12(a2) +; RV32-NEXT: lui t4, 349525 +; RV32-NEXT: addi t4, t4, 1365 +; RV32-NEXT: lui t3, 209715 +; RV32-NEXT: addi t3, t3, 819 +; RV32-NEXT: lui t2, 61681 +; RV32-NEXT: addi t2, t2, -241 +; RV32-NEXT: bnez s9, .LBB2_2 +; RV32-NEXT: # %bb.1: # %_udiv-special-cases +; RV32-NEXT: srli a0, s8, 1 +; RV32-NEXT: or a0, s8, a0 +; RV32-NEXT: srli a3, a0, 2 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: srli a3, a0, 4 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: srli a3, a0, 8 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: srli a3, a0, 16 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: not a0, a0 +; RV32-NEXT: srli a3, a0, 1 +; RV32-NEXT: and a3, a3, t4 +; RV32-NEXT: sub a0, a0, a3 +; RV32-NEXT: and a3, a0, t3 +; RV32-NEXT: srli a0, a0, 2 +; RV32-NEXT: and a0, a0, t3 +; RV32-NEXT: add a0, a3, a0 +; RV32-NEXT: srli a3, a0, 4 +; RV32-NEXT: add a0, a0, a3 +; RV32-NEXT: and a0, a0, t2 +; RV32-NEXT: slli a3, a0, 8 +; RV32-NEXT: add a0, a0, a3 +; RV32-NEXT: slli a3, a0, 16 +; RV32-NEXT: add a0, a0, a3 +; RV32-NEXT: srli a0, a0, 24 +; RV32-NEXT: addi t6, a0, 32 +; RV32-NEXT: j .LBB2_3 +; RV32-NEXT: .LBB2_2: +; RV32-NEXT: srli a0, s9, 1 +; RV32-NEXT: or a0, s9, a0 +; RV32-NEXT: srli a3, a0, 2 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: srli a3, a0, 4 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: srli a3, a0, 8 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: srli a3, a0, 16 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: not a0, a0 +; RV32-NEXT: srli a3, a0, 1 +; RV32-NEXT: and a3, a3, t4 +; RV32-NEXT: sub a0, a0, a3 +; RV32-NEXT: and a3, a0, t3 +; RV32-NEXT: srli a0, a0, 2 +; RV32-NEXT: and a0, a0, t3 +; RV32-NEXT: add a0, a3, a0 +; RV32-NEXT: srli a3, a0, 4 +; RV32-NEXT: add a0, a0, a3 +; RV32-NEXT: and a0, a0, t2 +; RV32-NEXT: slli a3, a0, 8 +; RV32-NEXT: add a0, a0, a3 +; RV32-NEXT: slli a3, a0, 16 +; RV32-NEXT: add a0, a0, a3 +; RV32-NEXT: srli t6, a0, 24 +; RV32-NEXT: .LBB2_3: # %_udiv-special-cases +; RV32-NEXT: lw a6, 4(a1) +; RV32-NEXT: or s0, s11, ra +; RV32-NEXT: bnez ra, .LBB2_5 +; RV32-NEXT: # %bb.4: # %_udiv-special-cases +; RV32-NEXT: srli a0, s11, 1 +; RV32-NEXT: or a0, s11, a0 +; RV32-NEXT: srli a3, a0, 2 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: srli a3, a0, 4 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: srli a3, a0, 8 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: srli a3, a0, 16 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: not a0, a0 +; RV32-NEXT: srli a3, a0, 1 +; RV32-NEXT: and a3, a3, t4 +; RV32-NEXT: sub a0, a0, a3 +; RV32-NEXT: and a3, a0, t3 +; RV32-NEXT: srli a0, a0, 2 +; RV32-NEXT: and a0, a0, t3 +; RV32-NEXT: add a0, a3, a0 +; RV32-NEXT: srli a3, a0, 4 +; RV32-NEXT: add a0, a0, a3 +; RV32-NEXT: and a0, a0, t2 +; RV32-NEXT: slli a3, a0, 8 +; RV32-NEXT: add a0, a0, a3 +; RV32-NEXT: slli a3, a0, 16 +; RV32-NEXT: add a0, a0, a3 +; RV32-NEXT: srli a0, a0, 24 +; RV32-NEXT: addi t5, a0, 32 +; RV32-NEXT: j .LBB2_6 +; RV32-NEXT: .LBB2_5: +; RV32-NEXT: srli a0, ra, 1 +; RV32-NEXT: or a0, ra, a0 +; RV32-NEXT: srli a3, a0, 2 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: srli a3, a0, 4 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: srli a3, a0, 8 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: srli a3, a0, 16 +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: not a0, a0 +; RV32-NEXT: srli a3, a0, 1 +; RV32-NEXT: and a3, a3, t4 +; RV32-NEXT: sub a0, a0, a3 +; RV32-NEXT: and a3, a0, t3 +; RV32-NEXT: srli a0, a0, 2 +; RV32-NEXT: and a0, a0, t3 +; RV32-NEXT: add a0, a3, a0 +; RV32-NEXT: srli a3, a0, 4 +; RV32-NEXT: add a0, a0, a3 +; RV32-NEXT: and a0, a0, t2 +; RV32-NEXT: slli a3, a0, 8 +; RV32-NEXT: add a0, a0, a3 +; RV32-NEXT: slli a3, a0, 16 +; RV32-NEXT: add a0, a0, a3 +; RV32-NEXT: srli t5, a0, 24 +; RV32-NEXT: .LBB2_6: # %_udiv-special-cases +; RV32-NEXT: lw a7, 12(a1) +; RV32-NEXT: addi a0, t6, 64 +; RV32-NEXT: bnez s0, .LBB2_8 +; RV32-NEXT: # %bb.7: # %_udiv-special-cases +; RV32-NEXT: mv t5, a0 +; RV32-NEXT: .LBB2_8: # %_udiv-special-cases +; RV32-NEXT: lw t1, 0(a1) +; RV32-NEXT: lw t0, 8(a1) +; RV32-NEXT: snez s3, s0 +; RV32-NEXT: bnez a6, .LBB2_10 +; RV32-NEXT: # %bb.9: # %_udiv-special-cases +; RV32-NEXT: srli a1, t1, 1 +; RV32-NEXT: or a1, t1, a1 +; RV32-NEXT: srli a3, a1, 2 +; RV32-NEXT: or a1, a1, a3 +; RV32-NEXT: srli a3, a1, 4 +; RV32-NEXT: or a1, a1, a3 +; RV32-NEXT: srli a3, a1, 8 +; RV32-NEXT: or a1, a1, a3 +; RV32-NEXT: srli a3, a1, 16 +; RV32-NEXT: or a1, a1, a3 +; RV32-NEXT: not a1, a1 +; RV32-NEXT: srli a3, a1, 1 +; RV32-NEXT: and a3, a3, t4 +; RV32-NEXT: sub a1, a1, a3 +; RV32-NEXT: and a3, a1, t3 +; RV32-NEXT: srli a1, a1, 2 +; RV32-NEXT: and a1, a1, t3 +; RV32-NEXT: add a1, a3, a1 +; RV32-NEXT: srli a3, a1, 4 +; RV32-NEXT: add a1, a1, a3 +; RV32-NEXT: and a1, a1, t2 +; RV32-NEXT: slli a3, a1, 8 +; RV32-NEXT: add a1, a1, a3 +; RV32-NEXT: slli a3, a1, 16 +; RV32-NEXT: add a1, a1, a3 +; RV32-NEXT: srli a1, a1, 24 +; RV32-NEXT: addi a3, a1, 32 +; RV32-NEXT: j .LBB2_11 +; RV32-NEXT: .LBB2_10: +; RV32-NEXT: srli a1, a6, 1 +; RV32-NEXT: or a1, a6, a1 +; RV32-NEXT: srli a3, a1, 2 +; RV32-NEXT: or a1, a1, a3 +; RV32-NEXT: srli a3, a1, 4 +; RV32-NEXT: or a1, a1, a3 +; RV32-NEXT: srli a3, a1, 8 +; RV32-NEXT: or a1, a1, a3 +; RV32-NEXT: srli a3, a1, 16 +; RV32-NEXT: or a1, a1, a3 +; RV32-NEXT: not a1, a1 +; RV32-NEXT: srli a3, a1, 1 +; RV32-NEXT: and a3, a3, t4 +; RV32-NEXT: sub a1, a1, a3 +; RV32-NEXT: and a3, a1, t3 +; RV32-NEXT: srli a1, a1, 2 +; RV32-NEXT: and a1, a1, t3 +; RV32-NEXT: add a1, a3, a1 +; RV32-NEXT: srli a3, a1, 4 +; RV32-NEXT: add a1, a1, a3 +; RV32-NEXT: and a1, a1, t2 +; RV32-NEXT: slli a3, a1, 8 +; RV32-NEXT: add a1, a1, a3 +; RV32-NEXT: slli a3, a1, 16 +; RV32-NEXT: add a1, a1, a3 +; RV32-NEXT: srli a3, a1, 24 +; RV32-NEXT: .LBB2_11: # %_udiv-special-cases +; RV32-NEXT: or a1, s9, ra +; RV32-NEXT: or s0, s8, s11 +; RV32-NEXT: or s1, a6, a7 +; RV32-NEXT: or s2, t1, t0 +; RV32-NEXT: sltu t6, a0, t6 +; RV32-NEXT: addi s3, s3, -1 +; RV32-NEXT: addi a0, a3, 64 +; RV32-NEXT: or s4, t0, a7 +; RV32-NEXT: sltu s5, a0, a3 +; RV32-NEXT: snez s6, s4 +; RV32-NEXT: addi s6, s6, -1 +; RV32-NEXT: bnez a7, .LBB2_13 +; RV32-NEXT: # %bb.12: # %_udiv-special-cases +; RV32-NEXT: srli a3, t0, 1 +; RV32-NEXT: or a3, t0, a3 +; RV32-NEXT: srli a4, a3, 2 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 4 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 8 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 16 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: not a3, a3 +; RV32-NEXT: srli a4, a3, 1 +; RV32-NEXT: and a4, a4, t4 +; RV32-NEXT: sub a3, a3, a4 +; RV32-NEXT: and a4, a3, t3 +; RV32-NEXT: srli a3, a3, 2 +; RV32-NEXT: and a3, a3, t3 +; RV32-NEXT: add a3, a4, a3 +; RV32-NEXT: srli a4, a3, 4 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: and a3, a3, t2 +; RV32-NEXT: slli a4, a3, 8 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: slli a4, a3, 16 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: srli a3, a3, 24 +; RV32-NEXT: addi a3, a3, 32 +; RV32-NEXT: j .LBB2_14 +; RV32-NEXT: .LBB2_13: +; RV32-NEXT: srli a3, a7, 1 +; RV32-NEXT: or a3, a7, a3 +; RV32-NEXT: srli a4, a3, 2 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 4 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 8 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 16 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: not a3, a3 +; RV32-NEXT: srli a4, a3, 1 +; RV32-NEXT: and a4, a4, t4 +; RV32-NEXT: sub a3, a3, a4 +; RV32-NEXT: and a4, a3, t3 +; RV32-NEXT: srli a3, a3, 2 +; RV32-NEXT: and a3, a3, t3 +; RV32-NEXT: add a3, a4, a3 +; RV32-NEXT: srli a4, a3, 4 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: and a3, a3, t2 +; RV32-NEXT: slli a4, a3, 8 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: slli a4, a3, 16 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: srli a3, a3, 24 +; RV32-NEXT: .LBB2_14: # %_udiv-special-cases +; RV32-NEXT: or s0, s0, a1 +; RV32-NEXT: or a5, s2, s1 +; RV32-NEXT: and a1, s3, t6 +; RV32-NEXT: and a4, s6, s5 +; RV32-NEXT: bnez s4, .LBB2_16 +; RV32-NEXT: # %bb.15: # %_udiv-special-cases +; RV32-NEXT: mv a3, a0 +; RV32-NEXT: .LBB2_16: # %_udiv-special-cases +; RV32-NEXT: seqz a0, s0 +; RV32-NEXT: seqz a5, a5 +; RV32-NEXT: sltu t2, t5, a3 +; RV32-NEXT: sub t4, a1, a4 +; RV32-NEXT: mv t3, t2 +; RV32-NEXT: beq a1, a4, .LBB2_18 +; RV32-NEXT: # %bb.17: # %_udiv-special-cases +; RV32-NEXT: sltu t3, a1, a4 +; RV32-NEXT: .LBB2_18: # %_udiv-special-cases +; RV32-NEXT: sub t2, t4, t2 +; RV32-NEXT: or a0, a0, a5 +; RV32-NEXT: neg t4, t3 +; RV32-NEXT: seqz t6, t3 +; RV32-NEXT: addi t6, t6, -1 +; RV32-NEXT: or a1, t4, t6 +; RV32-NEXT: sub t3, t5, a3 +; RV32-NEXT: beqz a1, .LBB2_20 +; RV32-NEXT: # %bb.19: # %_udiv-special-cases +; RV32-NEXT: snez a1, a1 +; RV32-NEXT: j .LBB2_21 +; RV32-NEXT: .LBB2_20: +; RV32-NEXT: snez a1, t2 +; RV32-NEXT: sltiu a3, t3, 128 +; RV32-NEXT: xori a3, a3, 1 +; RV32-NEXT: or a1, a3, a1 +; RV32-NEXT: .LBB2_21: # %_udiv-special-cases +; RV32-NEXT: or a5, a0, a1 +; RV32-NEXT: addi a3, a5, -1 +; RV32-NEXT: and a0, a3, a7 +; RV32-NEXT: and a1, a3, t0 +; RV32-NEXT: and a4, a3, a6 +; RV32-NEXT: and a3, a3, t1 +; RV32-NEXT: bnez a5, .LBB2_26 +; RV32-NEXT: # %bb.22: # %_udiv-special-cases +; RV32-NEXT: xori a5, t3, 127 +; RV32-NEXT: or a5, a5, t4 +; RV32-NEXT: or t5, t2, t6 +; RV32-NEXT: or a5, a5, t5 +; RV32-NEXT: beqz a5, .LBB2_26 +; RV32-NEXT: # %bb.23: # %udiv-bb1 +; RV32-NEXT: sw s7, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: addi a1, t3, 1 +; RV32-NEXT: sw zero, 72(sp) +; RV32-NEXT: sw zero, 76(sp) +; RV32-NEXT: sw zero, 80(sp) +; RV32-NEXT: sw zero, 84(sp) +; RV32-NEXT: sw t1, 88(sp) +; RV32-NEXT: sw a6, 92(sp) +; RV32-NEXT: sw t0, 96(sp) +; RV32-NEXT: sw a7, 100(sp) +; RV32-NEXT: li a0, 127 +; RV32-NEXT: addi a2, sp, 88 +; RV32-NEXT: seqz a3, a1 +; RV32-NEXT: sub a0, a0, t3 +; RV32-NEXT: add t2, t2, a3 +; RV32-NEXT: andi a3, a0, 31 +; RV32-NEXT: srli a0, a0, 3 +; RV32-NEXT: or a4, a1, t2 +; RV32-NEXT: xori a3, a3, 31 +; RV32-NEXT: andi a0, a0, 12 +; RV32-NEXT: seqz t5, a4 +; RV32-NEXT: sub a2, a2, a0 +; RV32-NEXT: add t5, t4, t5 +; RV32-NEXT: lw a0, 0(a2) +; RV32-NEXT: lw a4, 4(a2) +; RV32-NEXT: lw a5, 8(a2) +; RV32-NEXT: lw a2, 12(a2) +; RV32-NEXT: sltu t4, t5, t4 +; RV32-NEXT: or s0, a1, t5 +; RV32-NEXT: add t4, t6, t4 +; RV32-NEXT: or t6, t2, t4 +; RV32-NEXT: or s0, s0, t6 +; RV32-NEXT: srli t6, a5, 1 +; RV32-NEXT: srli s1, a4, 1 +; RV32-NEXT: srli s2, a0, 1 +; RV32-NEXT: srl t6, t6, a3 +; RV32-NEXT: srl s1, s1, a3 +; RV32-NEXT: srl a3, s2, a3 +; RV32-NEXT: not t3, t3 +; RV32-NEXT: sll a2, a2, t3 +; RV32-NEXT: or s2, a2, t6 +; RV32-NEXT: sll a2, a5, t3 +; RV32-NEXT: sll a4, a4, t3 +; RV32-NEXT: or s1, a2, s1 +; RV32-NEXT: or t6, a4, a3 +; RV32-NEXT: sll t3, a0, t3 +; RV32-NEXT: bnez s0, .LBB2_27 +; RV32-NEXT: # %bb.24: +; RV32-NEXT: li s6, 0 +; RV32-NEXT: li s7, 0 +; RV32-NEXT: li s8, 0 +; RV32-NEXT: .LBB2_25: # %udiv-loop-exit +; RV32-NEXT: srli a0, s1, 31 +; RV32-NEXT: slli s2, s2, 1 +; RV32-NEXT: or a0, s2, a0 +; RV32-NEXT: srli a1, t6, 31 +; RV32-NEXT: slli s1, s1, 1 +; RV32-NEXT: or a1, s1, a1 +; RV32-NEXT: srli a2, t3, 31 +; RV32-NEXT: slli t6, t6, 1 +; RV32-NEXT: slli a3, t3, 1 +; RV32-NEXT: or a3, s0, a3 +; RV32-NEXT: or a2, s6, a2 +; RV32-NEXT: or a4, a2, t6 +; RV32-NEXT: or a1, s7, a1 +; RV32-NEXT: or a0, s8, a0 +; RV32-NEXT: lw s7, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .LBB2_26: # %udiv-end +; RV32-NEXT: sw a3, 0(s7) +; RV32-NEXT: sw a4, 4(s7) +; RV32-NEXT: sw a1, 8(s7) +; RV32-NEXT: sw a0, 12(s7) +; RV32-NEXT: lw ra, 156(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 152(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s1, 148(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s2, 144(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s3, 140(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s4, 136(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s5, 132(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s6, 128(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s7, 124(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s8, 120(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s9, 116(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s10, 112(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s11, 108(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 160 +; RV32-NEXT: ret +; RV32-NEXT: .LBB2_27: # %udiv-preheader +; RV32-NEXT: li s0, 0 +; RV32-NEXT: li s5, 0 +; RV32-NEXT: li s3, 0 +; RV32-NEXT: li s4, 0 +; RV32-NEXT: sw zero, 56(sp) +; RV32-NEXT: sw zero, 60(sp) +; RV32-NEXT: sw zero, 64(sp) +; RV32-NEXT: sw zero, 68(sp) +; RV32-NEXT: sw t1, 40(sp) +; RV32-NEXT: sw a6, 44(sp) +; RV32-NEXT: sw t0, 48(sp) +; RV32-NEXT: sw a7, 52(sp) +; RV32-NEXT: srli a0, a1, 3 +; RV32-NEXT: addi a2, sp, 40 +; RV32-NEXT: andi a0, a0, 12 +; RV32-NEXT: add a0, a2, a0 +; RV32-NEXT: lw a2, 4(a0) +; RV32-NEXT: lw a3, 8(a0) +; RV32-NEXT: lw a4, 12(a0) +; RV32-NEXT: lw a0, 0(a0) +; RV32-NEXT: andi a5, a1, 31 +; RV32-NEXT: xori a5, a5, 31 +; RV32-NEXT: slli a6, a4, 1 +; RV32-NEXT: slli a7, a3, 1 +; RV32-NEXT: slli t0, a2, 1 +; RV32-NEXT: sll a6, a6, a5 +; RV32-NEXT: sll a7, a7, a5 +; RV32-NEXT: sll a5, t0, a5 +; RV32-NEXT: seqz t0, s8 +; RV32-NEXT: srl a3, a3, a1 +; RV32-NEXT: or s10, a3, a6 +; RV32-NEXT: or a3, s8, s9 +; RV32-NEXT: sw s9, 32(sp) # 4-byte Folded Spill +; RV32-NEXT: sub a6, s9, t0 +; RV32-NEXT: seqz a3, a3 +; RV32-NEXT: srl a2, a2, a1 +; RV32-NEXT: or s9, a2, a7 +; RV32-NEXT: sub a7, s11, a3 +; RV32-NEXT: sw s11, 28(sp) # 4-byte Folded Spill +; RV32-NEXT: sltu a2, s11, a3 +; RV32-NEXT: sw ra, 24(sp) # 4-byte Folded Spill +; RV32-NEXT: sub a2, ra, a2 +; RV32-NEXT: sw a2, 20(sp) # 4-byte Folded Spill +; RV32-NEXT: srl a0, a0, a1 +; RV32-NEXT: srl ra, a4, a1 +; RV32-NEXT: or t1, a0, a5 +; RV32-NEXT: sw s8, 36(sp) # 4-byte Folded Spill +; RV32-NEXT: addi s8, s8, -1 +; RV32-NEXT: sw s8, 16(sp) # 4-byte Folded Spill +; RV32-NEXT: li s7, 0 +; RV32-NEXT: li s8, 0 +; RV32-NEXT: j .LBB2_29 +; RV32-NEXT: .LBB2_28: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB2_29 Depth=1 +; RV32-NEXT: li s6, 0 +; RV32-NEXT: sub a0, a0, a5 +; RV32-NEXT: srli a5, s1, 31 +; RV32-NEXT: slli s2, s2, 1 +; RV32-NEXT: or a5, s2, a5 +; RV32-NEXT: srli s2, t6, 31 +; RV32-NEXT: slli s1, s1, 1 +; RV32-NEXT: or s1, s1, s2 +; RV32-NEXT: srli s2, t3, 31 +; RV32-NEXT: slli t6, t6, 1 +; RV32-NEXT: slli t3, t3, 1 +; RV32-NEXT: or t6, t6, s2 +; RV32-NEXT: lw a2, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: and s2, s10, a2 +; RV32-NEXT: or t3, s0, t3 +; RV32-NEXT: sub a2, a3, s2 +; RV32-NEXT: sltu a3, a3, s2 +; RV32-NEXT: lw t0, 24(sp) # 4-byte Folded Reload +; RV32-NEXT: and s0, s10, t0 +; RV32-NEXT: sub t0, s9, s0 +; RV32-NEXT: or s2, a1, t2 +; RV32-NEXT: sub s9, a0, a4 +; RV32-NEXT: seqz a0, a1 +; RV32-NEXT: sub t2, t2, a0 +; RV32-NEXT: or t6, s5, t6 +; RV32-NEXT: addi a1, a1, -1 +; RV32-NEXT: andi s0, s10, 1 +; RV32-NEXT: seqz a0, s2 +; RV32-NEXT: or s1, s3, s1 +; RV32-NEXT: or s2, s4, a5 +; RV32-NEXT: sub s10, a2, ra +; RV32-NEXT: sltu a2, a2, ra +; RV32-NEXT: sub a3, t0, a3 +; RV32-NEXT: sltu a4, t5, a0 +; RV32-NEXT: sub t5, t5, a0 +; RV32-NEXT: sub ra, a3, a2 +; RV32-NEXT: sub t4, t4, a4 +; RV32-NEXT: or a0, t2, t4 +; RV32-NEXT: or a2, a1, t5 +; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: sub t1, s11, t1 +; RV32-NEXT: li s5, 0 +; RV32-NEXT: li s3, 0 +; RV32-NEXT: li s4, 0 +; RV32-NEXT: beqz a0, .LBB2_25 +; RV32-NEXT: .LBB2_29: # %udiv-do-while +; RV32-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32-NEXT: srli a0, t1, 31 +; RV32-NEXT: slli a3, s9, 1 +; RV32-NEXT: slli t1, t1, 1 +; RV32-NEXT: or a0, a3, a0 +; RV32-NEXT: srli a3, s2, 31 +; RV32-NEXT: or s11, t1, a3 +; RV32-NEXT: beq a6, a0, .LBB2_31 +; RV32-NEXT: # %bb.30: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB2_29 Depth=1 +; RV32-NEXT: sltu a4, a6, a0 +; RV32-NEXT: j .LBB2_32 +; RV32-NEXT: .LBB2_31: # in Loop: Header=BB2_29 Depth=1 +; RV32-NEXT: lw a2, 16(sp) # 4-byte Folded Reload +; RV32-NEXT: sltu a4, a2, s11 +; RV32-NEXT: .LBB2_32: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB2_29 Depth=1 +; RV32-NEXT: lw a2, 36(sp) # 4-byte Folded Reload +; RV32-NEXT: srli a3, s10, 31 +; RV32-NEXT: slli ra, ra, 1 +; RV32-NEXT: srli a5, s9, 31 +; RV32-NEXT: slli s10, s10, 1 +; RV32-NEXT: or s9, ra, a3 +; RV32-NEXT: or a3, s10, a5 +; RV32-NEXT: sub a5, a7, a3 +; RV32-NEXT: sltu t1, a7, a3 +; RV32-NEXT: lw t0, 20(sp) # 4-byte Folded Reload +; RV32-NEXT: sub s6, t0, s9 +; RV32-NEXT: sltu a4, a5, a4 +; RV32-NEXT: sub a5, s6, t1 +; RV32-NEXT: sub a5, a5, a4 +; RV32-NEXT: srai s10, a5, 31 +; RV32-NEXT: and t1, s10, a2 +; RV32-NEXT: lw a2, 32(sp) # 4-byte Folded Reload +; RV32-NEXT: and a5, s10, a2 +; RV32-NEXT: sltu a4, s11, t1 +; RV32-NEXT: mv ra, a4 +; RV32-NEXT: beq a0, a5, .LBB2_28 +; RV32-NEXT: # %bb.33: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB2_29 Depth=1 +; RV32-NEXT: sltu ra, a0, a5 +; RV32-NEXT: j .LBB2_28 +; +; RV64-LABEL: udiv_i128: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64-NEXT: call __udivti3 +; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret %res = udiv i128 %x, %y ret i128 %res } define i129 @udiv_i129(i129 %x, i129 %y) nounwind { -; CHECK-LABEL: udiv_i129: -; CHECK-NOT: call{{.*}}div +; RV32-LABEL: udiv_i129: +; RV32: # %bb.0: # %_udiv-special-cases +; RV32-NEXT: addi sp, sp, -240 +; RV32-NEXT: sw ra, 236(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 232(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s1, 228(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s2, 224(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s3, 220(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s4, 216(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s5, 212(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s6, 208(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s7, 204(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s8, 200(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s9, 196(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s10, 192(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s11, 188(sp) # 4-byte Folded Spill +; RV32-NEXT: mv ra, a0 +; RV32-NEXT: lw t2, 16(a2) +; RV32-NEXT: lw a4, 0(a2) +; RV32-NEXT: lw a5, 4(a2) +; RV32-NEXT: lw a6, 8(a2) +; RV32-NEXT: lw a0, 12(a2) +; RV32-NEXT: sw a0, 24(sp) # 4-byte Folded Spill +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: lui a3, 61681 +; RV32-NEXT: addi t5, a0, 1365 +; RV32-NEXT: addi t4, a2, 819 +; RV32-NEXT: addi t3, a3, -241 +; RV32-NEXT: sw a6, 28(sp) # 4-byte Folded Spill +; RV32-NEXT: slli a0, a6, 31 +; RV32-NEXT: srli a2, a5, 1 +; RV32-NEXT: sw a5, 20(sp) # 4-byte Folded Spill +; RV32-NEXT: slli a3, a5, 31 +; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: sw a4, 32(sp) # 4-byte Folded Spill +; RV32-NEXT: srli a2, a4, 1 +; RV32-NEXT: or a2, a2, a3 +; RV32-NEXT: bnez a0, .LBB3_2 +; RV32-NEXT: # %bb.1: # %_udiv-special-cases +; RV32-NEXT: srli a3, a2, 1 +; RV32-NEXT: or a3, a2, a3 +; RV32-NEXT: srli a4, a3, 2 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 4 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 8 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 16 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: not a3, a3 +; RV32-NEXT: srli a4, a3, 1 +; RV32-NEXT: and a4, a4, t5 +; RV32-NEXT: sub a3, a3, a4 +; RV32-NEXT: and a4, a3, t4 +; RV32-NEXT: srli a3, a3, 2 +; RV32-NEXT: and a3, a3, t4 +; RV32-NEXT: add a3, a4, a3 +; RV32-NEXT: srli a4, a3, 4 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: and a3, a3, t3 +; RV32-NEXT: slli a4, a3, 8 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: slli a4, a3, 16 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: srli a3, a3, 24 +; RV32-NEXT: addi a6, a3, 32 +; RV32-NEXT: j .LBB3_3 +; RV32-NEXT: .LBB3_2: +; RV32-NEXT: srli a3, a0, 1 +; RV32-NEXT: or a3, a0, a3 +; RV32-NEXT: srli a4, a3, 2 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 4 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 8 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 16 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: not a3, a3 +; RV32-NEXT: srli a4, a3, 1 +; RV32-NEXT: and a4, a4, t5 +; RV32-NEXT: sub a3, a3, a4 +; RV32-NEXT: and a4, a3, t4 +; RV32-NEXT: srli a3, a3, 2 +; RV32-NEXT: and a3, a3, t4 +; RV32-NEXT: add a3, a4, a3 +; RV32-NEXT: srli a4, a3, 4 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: and a3, a3, t3 +; RV32-NEXT: slli a4, a3, 8 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: slli a4, a3, 16 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: srli a6, a3, 24 +; RV32-NEXT: .LBB3_3: # %_udiv-special-cases +; RV32-NEXT: lw a7, 24(sp) # 4-byte Folded Reload +; RV32-NEXT: srli a3, a7, 1 +; RV32-NEXT: slli a5, t2, 31 +; RV32-NEXT: slli a7, a7, 31 +; RV32-NEXT: lw a4, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: srli t0, a4, 1 +; RV32-NEXT: lw a4, 32(sp) # 4-byte Folded Reload +; RV32-NEXT: slli a4, a4, 31 +; RV32-NEXT: li s2, 64 +; RV32-NEXT: bnez a4, .LBB3_5 +; RV32-NEXT: # %bb.4: # %_udiv-special-cases +; RV32-NEXT: li t6, 64 +; RV32-NEXT: j .LBB3_6 +; RV32-NEXT: .LBB3_5: +; RV32-NEXT: srli t1, a4, 1 +; RV32-NEXT: or t1, a4, t1 +; RV32-NEXT: srli t6, t1, 2 +; RV32-NEXT: or t1, t1, t6 +; RV32-NEXT: srli t6, t1, 4 +; RV32-NEXT: or t1, t1, t6 +; RV32-NEXT: srli t6, t1, 8 +; RV32-NEXT: or t1, t1, t6 +; RV32-NEXT: srli t6, t1, 16 +; RV32-NEXT: or t1, t1, t6 +; RV32-NEXT: not t1, t1 +; RV32-NEXT: srli t6, t1, 1 +; RV32-NEXT: and t6, t6, t5 +; RV32-NEXT: sub t1, t1, t6 +; RV32-NEXT: and t6, t1, t4 +; RV32-NEXT: srli t1, t1, 2 +; RV32-NEXT: and t1, t1, t4 +; RV32-NEXT: add t1, t6, t1 +; RV32-NEXT: srli t6, t1, 4 +; RV32-NEXT: add t1, t1, t6 +; RV32-NEXT: and t1, t1, t3 +; RV32-NEXT: slli t6, t1, 8 +; RV32-NEXT: add t1, t1, t6 +; RV32-NEXT: slli t6, t1, 16 +; RV32-NEXT: add t1, t1, t6 +; RV32-NEXT: srli t6, t1, 24 +; RV32-NEXT: .LBB3_6: # %_udiv-special-cases +; RV32-NEXT: or t1, a5, a3 +; RV32-NEXT: or a7, t0, a7 +; RV32-NEXT: bnez a4, .LBB3_8 +; RV32-NEXT: # %bb.7: # %_udiv-special-cases +; RV32-NEXT: li t6, 128 +; RV32-NEXT: .LBB3_8: # %_udiv-special-cases +; RV32-NEXT: or a5, a7, t1 +; RV32-NEXT: addi a4, a6, 64 +; RV32-NEXT: addi a3, t6, 128 +; RV32-NEXT: or a0, a0, t1 +; RV32-NEXT: or a2, a2, a7 +; RV32-NEXT: or s3, a2, a0 +; RV32-NEXT: sltu s0, a3, t6 +; RV32-NEXT: bnez s3, .LBB3_11 +; RV32-NEXT: # %bb.9: # %_udiv-special-cases +; RV32-NEXT: mv t6, s0 +; RV32-NEXT: beqz t1, .LBB3_12 +; RV32-NEXT: .LBB3_10: +; RV32-NEXT: srli a0, t1, 1 +; RV32-NEXT: or a0, t1, a0 +; RV32-NEXT: srli a2, a0, 2 +; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: srli a2, a0, 4 +; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: srli a2, a0, 8 +; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: srli a2, a0, 16 +; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: not a0, a0 +; RV32-NEXT: srli a2, a0, 1 +; RV32-NEXT: and a2, a2, t5 +; RV32-NEXT: sub a0, a0, a2 +; RV32-NEXT: and a2, a0, t4 +; RV32-NEXT: srli a0, a0, 2 +; RV32-NEXT: and a0, a0, t4 +; RV32-NEXT: add a0, a2, a0 +; RV32-NEXT: srli a2, a0, 4 +; RV32-NEXT: add a0, a0, a2 +; RV32-NEXT: and a0, a0, t3 +; RV32-NEXT: slli a2, a0, 8 +; RV32-NEXT: add a0, a0, a2 +; RV32-NEXT: slli a2, a0, 16 +; RV32-NEXT: add a0, a0, a2 +; RV32-NEXT: srli s1, a0, 24 +; RV32-NEXT: beqz a5, .LBB3_13 +; RV32-NEXT: j .LBB3_14 +; RV32-NEXT: .LBB3_11: +; RV32-NEXT: snez a0, a5 +; RV32-NEXT: sltu a2, a4, a6 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and t6, a0, a2 +; RV32-NEXT: bnez t1, .LBB3_10 +; RV32-NEXT: .LBB3_12: # %_udiv-special-cases +; RV32-NEXT: srli a0, a7, 1 +; RV32-NEXT: or a0, a7, a0 +; RV32-NEXT: srli a2, a0, 2 +; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: srli a2, a0, 4 +; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: srli a2, a0, 8 +; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: srli a2, a0, 16 +; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: not a0, a0 +; RV32-NEXT: srli a2, a0, 1 +; RV32-NEXT: and a2, a2, t5 +; RV32-NEXT: sub a0, a0, a2 +; RV32-NEXT: and a2, a0, t4 +; RV32-NEXT: srli a0, a0, 2 +; RV32-NEXT: and a0, a0, t4 +; RV32-NEXT: add a0, a2, a0 +; RV32-NEXT: srli a2, a0, 4 +; RV32-NEXT: add a0, a0, a2 +; RV32-NEXT: and a0, a0, t3 +; RV32-NEXT: slli a2, a0, 8 +; RV32-NEXT: add a0, a0, a2 +; RV32-NEXT: slli a2, a0, 16 +; RV32-NEXT: add a0, a0, a2 +; RV32-NEXT: srli a0, a0, 24 +; RV32-NEXT: addi s1, a0, 32 +; RV32-NEXT: bnez a5, .LBB3_14 +; RV32-NEXT: .LBB3_13: # %_udiv-special-cases +; RV32-NEXT: mv s1, a4 +; RV32-NEXT: .LBB3_14: # %_udiv-special-cases +; RV32-NEXT: lw a7, 0(a1) +; RV32-NEXT: lw t0, 4(a1) +; RV32-NEXT: lw a6, 8(a1) +; RV32-NEXT: bnez s3, .LBB3_16 +; RV32-NEXT: # %bb.15: # %_udiv-special-cases +; RV32-NEXT: mv s1, a3 +; RV32-NEXT: .LBB3_16: # %_udiv-special-cases +; RV32-NEXT: lw t1, 12(a1) +; RV32-NEXT: lw a1, 16(a1) +; RV32-NEXT: slli a0, a6, 31 +; RV32-NEXT: srli a2, t0, 1 +; RV32-NEXT: or a0, a2, a0 +; RV32-NEXT: slli a2, t0, 31 +; RV32-NEXT: srli a3, a7, 1 +; RV32-NEXT: or a2, a3, a2 +; RV32-NEXT: bnez a0, .LBB3_18 +; RV32-NEXT: # %bb.17: # %_udiv-special-cases +; RV32-NEXT: srli a3, a2, 1 +; RV32-NEXT: or a3, a2, a3 +; RV32-NEXT: srli a4, a3, 2 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 4 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 8 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 16 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: not a3, a3 +; RV32-NEXT: srli a4, a3, 1 +; RV32-NEXT: and a4, a4, t5 +; RV32-NEXT: sub a3, a3, a4 +; RV32-NEXT: and a4, a3, t4 +; RV32-NEXT: srli a3, a3, 2 +; RV32-NEXT: and a3, a3, t4 +; RV32-NEXT: add a3, a4, a3 +; RV32-NEXT: srli a4, a3, 4 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: and a3, a3, t3 +; RV32-NEXT: slli a4, a3, 8 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: slli a4, a3, 16 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: srli a3, a3, 24 +; RV32-NEXT: addi s5, a3, 32 +; RV32-NEXT: j .LBB3_19 +; RV32-NEXT: .LBB3_18: +; RV32-NEXT: srli a3, a0, 1 +; RV32-NEXT: or a3, a0, a3 +; RV32-NEXT: srli a4, a3, 2 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 4 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 8 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: srli a4, a3, 16 +; RV32-NEXT: or a3, a3, a4 +; RV32-NEXT: not a3, a3 +; RV32-NEXT: srli a4, a3, 1 +; RV32-NEXT: and a4, a4, t5 +; RV32-NEXT: sub a3, a3, a4 +; RV32-NEXT: and a4, a3, t4 +; RV32-NEXT: srli a3, a3, 2 +; RV32-NEXT: and a3, a3, t4 +; RV32-NEXT: add a3, a4, a3 +; RV32-NEXT: srli a4, a3, 4 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: and a3, a3, t3 +; RV32-NEXT: slli a4, a3, 8 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: slli a4, a3, 16 +; RV32-NEXT: add a3, a3, a4 +; RV32-NEXT: srli s5, a3, 24 +; RV32-NEXT: .LBB3_19: # %_udiv-special-cases +; RV32-NEXT: srli a3, t1, 1 +; RV32-NEXT: slli a4, a1, 31 +; RV32-NEXT: slli a5, t1, 31 +; RV32-NEXT: slli s4, a7, 31 +; RV32-NEXT: srli s6, a6, 1 +; RV32-NEXT: beqz s4, .LBB3_21 +; RV32-NEXT: # %bb.20: +; RV32-NEXT: srli s2, s4, 1 +; RV32-NEXT: or s2, s4, s2 +; RV32-NEXT: srli s7, s2, 2 +; RV32-NEXT: or s2, s2, s7 +; RV32-NEXT: srli s7, s2, 4 +; RV32-NEXT: or s2, s2, s7 +; RV32-NEXT: srli s7, s2, 8 +; RV32-NEXT: or s2, s2, s7 +; RV32-NEXT: srli s7, s2, 16 +; RV32-NEXT: or s2, s2, s7 +; RV32-NEXT: not s2, s2 +; RV32-NEXT: srli s7, s2, 1 +; RV32-NEXT: and s7, s7, t5 +; RV32-NEXT: sub s2, s2, s7 +; RV32-NEXT: and s7, s2, t4 +; RV32-NEXT: srli s2, s2, 2 +; RV32-NEXT: and s2, s2, t4 +; RV32-NEXT: add s2, s7, s2 +; RV32-NEXT: srli s7, s2, 4 +; RV32-NEXT: add s2, s2, s7 +; RV32-NEXT: and s2, s2, t3 +; RV32-NEXT: slli s7, s2, 8 +; RV32-NEXT: add s2, s2, s7 +; RV32-NEXT: slli s7, s2, 16 +; RV32-NEXT: add s2, s2, s7 +; RV32-NEXT: srli s2, s2, 24 +; RV32-NEXT: .LBB3_21: # %_udiv-special-cases +; RV32-NEXT: or s7, a4, a3 +; RV32-NEXT: or s6, s6, a5 +; RV32-NEXT: bnez s4, .LBB3_23 +; RV32-NEXT: # %bb.22: # %_udiv-special-cases +; RV32-NEXT: li s2, 128 +; RV32-NEXT: .LBB3_23: # %_udiv-special-cases +; RV32-NEXT: or s4, s6, s7 +; RV32-NEXT: addi a5, s5, 64 +; RV32-NEXT: addi a3, s2, 128 +; RV32-NEXT: or a0, a0, s7 +; RV32-NEXT: or a4, a2, s6 +; RV32-NEXT: or a4, a4, a0 +; RV32-NEXT: sltu a0, a3, s2 +; RV32-NEXT: bnez a4, .LBB3_26 +; RV32-NEXT: # %bb.24: # %_udiv-special-cases +; RV32-NEXT: mv a2, a0 +; RV32-NEXT: snez s2, s3 +; RV32-NEXT: beqz s7, .LBB3_27 +; RV32-NEXT: .LBB3_25: +; RV32-NEXT: srli s3, s7, 1 +; RV32-NEXT: or s3, s7, s3 +; RV32-NEXT: srli s5, s3, 2 +; RV32-NEXT: or s3, s3, s5 +; RV32-NEXT: srli s5, s3, 4 +; RV32-NEXT: or s3, s3, s5 +; RV32-NEXT: srli s5, s3, 8 +; RV32-NEXT: or s3, s3, s5 +; RV32-NEXT: srli s5, s3, 16 +; RV32-NEXT: or s3, s3, s5 +; RV32-NEXT: not s3, s3 +; RV32-NEXT: srli s5, s3, 1 +; RV32-NEXT: and t5, s5, t5 +; RV32-NEXT: sub t5, s3, t5 +; RV32-NEXT: and s3, t5, t4 +; RV32-NEXT: srli t5, t5, 2 +; RV32-NEXT: and t4, t5, t4 +; RV32-NEXT: add t4, s3, t4 +; RV32-NEXT: srli t5, t4, 4 +; RV32-NEXT: add t4, t4, t5 +; RV32-NEXT: and t3, t4, t3 +; RV32-NEXT: slli t4, t3, 8 +; RV32-NEXT: add t3, t3, t4 +; RV32-NEXT: slli t4, t3, 16 +; RV32-NEXT: add t3, t3, t4 +; RV32-NEXT: srli t3, t3, 24 +; RV32-NEXT: j .LBB3_28 +; RV32-NEXT: .LBB3_26: +; RV32-NEXT: snez a2, s4 +; RV32-NEXT: sltu s2, a5, s5 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a2, a2, s2 +; RV32-NEXT: snez s2, s3 +; RV32-NEXT: bnez s7, .LBB3_25 +; RV32-NEXT: .LBB3_27: # %_udiv-special-cases +; RV32-NEXT: srli s3, s6, 1 +; RV32-NEXT: or s3, s6, s3 +; RV32-NEXT: srli s5, s3, 2 +; RV32-NEXT: or s3, s3, s5 +; RV32-NEXT: srli s5, s3, 4 +; RV32-NEXT: or s3, s3, s5 +; RV32-NEXT: srli s5, s3, 8 +; RV32-NEXT: or s3, s3, s5 +; RV32-NEXT: srli s5, s3, 16 +; RV32-NEXT: or s3, s3, s5 +; RV32-NEXT: not s3, s3 +; RV32-NEXT: srli s5, s3, 1 +; RV32-NEXT: and t5, s5, t5 +; RV32-NEXT: sub t5, s3, t5 +; RV32-NEXT: and s3, t5, t4 +; RV32-NEXT: srli t5, t5, 2 +; RV32-NEXT: and t4, t5, t4 +; RV32-NEXT: add t4, s3, t4 +; RV32-NEXT: srli t5, t4, 4 +; RV32-NEXT: add t4, t4, t5 +; RV32-NEXT: and t3, t4, t3 +; RV32-NEXT: slli t4, t3, 8 +; RV32-NEXT: add t3, t3, t4 +; RV32-NEXT: slli t4, t3, 16 +; RV32-NEXT: add t3, t3, t4 +; RV32-NEXT: srli t3, t3, 24 +; RV32-NEXT: addi t3, t3, 32 +; RV32-NEXT: .LBB3_28: # %_udiv-special-cases +; RV32-NEXT: xori t4, s0, 1 +; RV32-NEXT: addi s2, s2, -1 +; RV32-NEXT: bnez s4, .LBB3_30 +; RV32-NEXT: # %bb.29: # %_udiv-special-cases +; RV32-NEXT: mv t3, a5 +; RV32-NEXT: .LBB3_30: # %_udiv-special-cases +; RV32-NEXT: andi s11, a1, 1 +; RV32-NEXT: andi s8, t2, 1 +; RV32-NEXT: lw a1, 32(sp) # 4-byte Folded Reload +; RV32-NEXT: lw a5, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: or s9, a1, a5 +; RV32-NEXT: or t2, a7, a6 +; RV32-NEXT: neg a1, t4 +; RV32-NEXT: and s0, s2, s0 +; RV32-NEXT: bnez a4, .LBB3_32 +; RV32-NEXT: # %bb.31: # %_udiv-special-cases +; RV32-NEXT: mv t3, a3 +; RV32-NEXT: .LBB3_32: # %_udiv-special-cases +; RV32-NEXT: lw a3, 20(sp) # 4-byte Folded Reload +; RV32-NEXT: lw a5, 24(sp) # 4-byte Folded Reload +; RV32-NEXT: or s10, a3, a5 +; RV32-NEXT: or a5, s9, s8 +; RV32-NEXT: or t4, t0, t1 +; RV32-NEXT: or t5, t2, s11 +; RV32-NEXT: and a1, s0, a1 +; RV32-NEXT: xori a3, a0, 1 +; RV32-NEXT: snez a4, a4 +; RV32-NEXT: neg a3, a3 +; RV32-NEXT: addi a4, a4, -1 +; RV32-NEXT: and a0, a4, a0 +; RV32-NEXT: sltu a4, s1, t3 +; RV32-NEXT: and t2, a0, a3 +; RV32-NEXT: mv a3, a4 +; RV32-NEXT: beq t6, a2, .LBB3_34 +; RV32-NEXT: # %bb.33: # %_udiv-special-cases +; RV32-NEXT: sltu a3, t6, a2 +; RV32-NEXT: .LBB3_34: # %_udiv-special-cases +; RV32-NEXT: or a0, a5, s10 +; RV32-NEXT: or t5, t5, t4 +; RV32-NEXT: sltu t4, a1, t2 +; RV32-NEXT: mv s0, a3 +; RV32-NEXT: beq a1, t2, .LBB3_36 +; RV32-NEXT: # %bb.35: # %_udiv-special-cases +; RV32-NEXT: mv s0, t4 +; RV32-NEXT: .LBB3_36: # %_udiv-special-cases +; RV32-NEXT: seqz a5, a0 +; RV32-NEXT: seqz t5, t5 +; RV32-NEXT: andi a0, s0, 1 +; RV32-NEXT: sub a2, t6, a2 +; RV32-NEXT: sub a1, a1, t2 +; RV32-NEXT: sub t2, a2, a4 +; RV32-NEXT: sltu a2, a1, a3 +; RV32-NEXT: add a2, t4, a2 +; RV32-NEXT: neg t4, a2 +; RV32-NEXT: sub a4, a1, a3 +; RV32-NEXT: or a1, a4, t4 +; RV32-NEXT: sub a3, s1, t3 +; RV32-NEXT: beqz a1, .LBB3_38 +; RV32-NEXT: # %bb.37: # %_udiv-special-cases +; RV32-NEXT: snez a1, a1 +; RV32-NEXT: or a2, a5, t5 +; RV32-NEXT: bnez a0, .LBB3_39 +; RV32-NEXT: j .LBB3_40 +; RV32-NEXT: .LBB3_38: +; RV32-NEXT: snez a1, t2 +; RV32-NEXT: sltiu a2, a3, 129 +; RV32-NEXT: xori a2, a2, 1 +; RV32-NEXT: or a1, a2, a1 +; RV32-NEXT: or a2, a5, t5 +; RV32-NEXT: beqz a0, .LBB3_40 +; RV32-NEXT: .LBB3_39: # %_udiv-special-cases +; RV32-NEXT: mv a1, a0 +; RV32-NEXT: .LBB3_40: # %_udiv-special-cases +; RV32-NEXT: or t6, a2, a1 +; RV32-NEXT: addi a1, t6, -1 +; RV32-NEXT: and a2, s11, a1 +; RV32-NEXT: and a5, a1, t1 +; RV32-NEXT: and t3, a1, a6 +; RV32-NEXT: and t5, a1, t0 +; RV32-NEXT: and a1, a1, a7 +; RV32-NEXT: bnez t6, .LBB3_57 +; RV32-NEXT: # %bb.41: # %_udiv-special-cases +; RV32-NEXT: or t6, t2, t4 +; RV32-NEXT: xori s0, a3, 128 +; RV32-NEXT: or s0, s0, a0 +; RV32-NEXT: or s0, s0, a4 +; RV32-NEXT: or t6, s0, t6 +; RV32-NEXT: beqz t6, .LBB3_57 +; RV32-NEXT: # %bb.42: # %udiv-bb1 +; RV32-NEXT: sw ra, 8(sp) # 4-byte Folded Spill +; RV32-NEXT: addi a1, a3, 1 +; RV32-NEXT: sw zero, 136(sp) +; RV32-NEXT: sw zero, 140(sp) +; RV32-NEXT: sw zero, 144(sp) +; RV32-NEXT: sw zero, 148(sp) +; RV32-NEXT: sw zero, 120(sp) +; RV32-NEXT: sw zero, 124(sp) +; RV32-NEXT: sw zero, 128(sp) +; RV32-NEXT: sw zero, 132(sp) +; RV32-NEXT: sw a7, 152(sp) +; RV32-NEXT: sw t0, 156(sp) +; RV32-NEXT: sw a6, 160(sp) +; RV32-NEXT: sw t1, 164(sp) +; RV32-NEXT: sw s11, 168(sp) +; RV32-NEXT: li a5, 128 +; RV32-NEXT: addi t3, sp, 152 +; RV32-NEXT: neg a2, a3 +; RV32-NEXT: seqz t5, a1 +; RV32-NEXT: sub a5, a5, a3 +; RV32-NEXT: add t2, t2, t5 +; RV32-NEXT: andi a3, a5, 31 +; RV32-NEXT: srli t5, a5, 3 +; RV32-NEXT: or t6, a1, t2 +; RV32-NEXT: xori a5, a3, 31 +; RV32-NEXT: andi a3, t5, 28 +; RV32-NEXT: seqz t6, t6 +; RV32-NEXT: sub ra, t3, a3 +; RV32-NEXT: add t6, a4, t6 +; RV32-NEXT: lw t3, 0(ra) +; RV32-NEXT: lw s0, 4(ra) +; RV32-NEXT: lw s1, 8(ra) +; RV32-NEXT: lw a3, 12(ra) +; RV32-NEXT: sltu a4, t6, a4 +; RV32-NEXT: or t5, a1, t6 +; RV32-NEXT: add t4, t4, a4 +; RV32-NEXT: or a4, t2, t4 +; RV32-NEXT: or a4, t5, a4 +; RV32-NEXT: srli t5, s1, 1 +; RV32-NEXT: seqz s2, a4 +; RV32-NEXT: add a0, a0, s2 +; RV32-NEXT: sll s2, a3, a2 +; RV32-NEXT: srl t5, t5, a5 +; RV32-NEXT: or t5, s2, t5 +; RV32-NEXT: srli s2, s0, 1 +; RV32-NEXT: sll s1, s1, a2 +; RV32-NEXT: srl s2, s2, a5 +; RV32-NEXT: or s2, s1, s2 +; RV32-NEXT: srli s1, t3, 1 +; RV32-NEXT: sll s0, s0, a2 +; RV32-NEXT: srl s1, s1, a5 +; RV32-NEXT: andi s3, a0, 1 +; RV32-NEXT: or s1, s0, s1 +; RV32-NEXT: or a0, a4, s3 +; RV32-NEXT: sll t3, t3, a2 +; RV32-NEXT: beqz a0, .LBB3_55 +; RV32-NEXT: # %bb.43: # %udiv-preheader +; RV32-NEXT: sw zero, 52(sp) # 4-byte Folded Spill +; RV32-NEXT: sw zero, 48(sp) # 4-byte Folded Spill +; RV32-NEXT: sw zero, 44(sp) # 4-byte Folded Spill +; RV32-NEXT: sw zero, 40(sp) # 4-byte Folded Spill +; RV32-NEXT: li s7, 0 +; RV32-NEXT: srli a3, a3, 1 +; RV32-NEXT: lw a0, 16(ra) +; RV32-NEXT: sw zero, 104(sp) +; RV32-NEXT: sw zero, 108(sp) +; RV32-NEXT: sw zero, 112(sp) +; RV32-NEXT: sw zero, 116(sp) +; RV32-NEXT: sw zero, 88(sp) +; RV32-NEXT: sw zero, 92(sp) +; RV32-NEXT: sw zero, 96(sp) +; RV32-NEXT: sw zero, 100(sp) +; RV32-NEXT: sw s11, 72(sp) +; RV32-NEXT: sw zero, 76(sp) +; RV32-NEXT: sw zero, 80(sp) +; RV32-NEXT: sw zero, 84(sp) +; RV32-NEXT: sw a7, 56(sp) +; RV32-NEXT: sw t0, 60(sp) +; RV32-NEXT: sw a6, 64(sp) +; RV32-NEXT: sw t1, 68(sp) +; RV32-NEXT: srli a4, a1, 3 +; RV32-NEXT: addi a6, sp, 56 +; RV32-NEXT: andi a7, a1, 31 +; RV32-NEXT: or t0, s9, s10 +; RV32-NEXT: srl a3, a3, a5 +; RV32-NEXT: andi a4, a4, 28 +; RV32-NEXT: xori a5, a7, 31 +; RV32-NEXT: snez a7, t0 +; RV32-NEXT: add a4, a6, a4 +; RV32-NEXT: add a7, s8, a7 +; RV32-NEXT: lw a6, 16(a4) +; RV32-NEXT: lw t0, 0(a4) +; RV32-NEXT: lw t1, 4(a4) +; RV32-NEXT: lw s0, 8(a4) +; RV32-NEXT: lw a4, 12(a4) +; RV32-NEXT: sll a0, a0, a2 +; RV32-NEXT: or a3, a0, a3 +; RV32-NEXT: slli a6, a6, 1 +; RV32-NEXT: slli a0, a4, 1 +; RV32-NEXT: slli a2, s0, 1 +; RV32-NEXT: slli s4, t1, 1 +; RV32-NEXT: sll a6, a6, a5 +; RV32-NEXT: sll a0, a0, a5 +; RV32-NEXT: sll s8, a2, a5 +; RV32-NEXT: sll s4, s4, a5 +; RV32-NEXT: srl a2, a4, a1 +; RV32-NEXT: or ra, a2, a6 +; RV32-NEXT: lw a6, 32(sp) # 4-byte Folded Reload +; RV32-NEXT: seqz a4, a6 +; RV32-NEXT: srl a2, s0, a1 +; RV32-NEXT: or a2, a2, a0 +; RV32-NEXT: lw a5, 20(sp) # 4-byte Folded Reload +; RV32-NEXT: or a0, a6, a5 +; RV32-NEXT: sub s5, a5, a4 +; RV32-NEXT: seqz a4, a0 +; RV32-NEXT: srl a0, t1, a1 +; RV32-NEXT: or a0, a0, s8 +; RV32-NEXT: lw a5, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: sub t1, a5, a4 +; RV32-NEXT: sw t1, 36(sp) # 4-byte Folded Spill +; RV32-NEXT: sltu a4, a5, a4 +; RV32-NEXT: addi a7, a7, 1 +; RV32-NEXT: lw a5, 24(sp) # 4-byte Folded Reload +; RV32-NEXT: sub s6, a5, a4 +; RV32-NEXT: andi a4, a7, 1 +; RV32-NEXT: sw a4, 16(sp) # 4-byte Folded Spill +; RV32-NEXT: andi a5, a3, 1 +; RV32-NEXT: srl a3, t0, a1 +; RV32-NEXT: or a4, a3, s4 +; RV32-NEXT: addi a6, a6, -1 +; RV32-NEXT: sw a6, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: li s11, 0 +; RV32-NEXT: li s10, 0 +; RV32-NEXT: j .LBB3_45 +; RV32-NEXT: .LBB3_44: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB3_45 Depth=1 +; RV32-NEXT: lw s0, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: and s0, a5, s0 +; RV32-NEXT: xor s8, t1, a7 +; RV32-NEXT: xor s9, a2, s0 +; RV32-NEXT: or s8, s9, s8 +; RV32-NEXT: li s9, 0 +; RV32-NEXT: li s8, 0 +; RV32-NEXT: sltu s4, a2, s0 +; RV32-NEXT: sub s0, a2, s0 +; RV32-NEXT: sub a7, t1, a7 +; RV32-NEXT: srli a2, s2, 31 +; RV32-NEXT: sub a0, a0, t0 +; RV32-NEXT: slli t0, t5, 1 +; RV32-NEXT: or t0, t0, a2 +; RV32-NEXT: srli a2, s1, 31 +; RV32-NEXT: slli s2, s2, 1 +; RV32-NEXT: or t1, s2, a2 +; RV32-NEXT: srli a2, t3, 31 +; RV32-NEXT: slli s1, s1, 1 +; RV32-NEXT: or s1, s1, a2 +; RV32-NEXT: slli t3, t3, 1 +; RV32-NEXT: lw a2, 52(sp) # 4-byte Folded Reload +; RV32-NEXT: or t3, a2, t3 +; RV32-NEXT: srli a2, t5, 31 +; RV32-NEXT: or s7, s7, a2 +; RV32-NEXT: sub a2, s0, ra +; RV32-NEXT: sltu s0, s0, ra +; RV32-NEXT: or t5, a1, t6 +; RV32-NEXT: sub a7, a7, s4 +; RV32-NEXT: or s2, t2, t4 +; RV32-NEXT: sub a0, a0, a6 +; RV32-NEXT: or a6, a1, t2 +; RV32-NEXT: or s4, t5, s2 +; RV32-NEXT: seqz t5, a1 +; RV32-NEXT: addi a1, a1, -1 +; RV32-NEXT: andi a5, a5, 1 +; RV32-NEXT: sw a5, 52(sp) # 4-byte Folded Spill +; RV32-NEXT: seqz a6, a6 +; RV32-NEXT: sub t2, t2, t5 +; RV32-NEXT: lw a5, 48(sp) # 4-byte Folded Reload +; RV32-NEXT: or s1, a5, s1 +; RV32-NEXT: lw a5, 44(sp) # 4-byte Folded Reload +; RV32-NEXT: or s2, a5, t1 +; RV32-NEXT: lw a5, 40(sp) # 4-byte Folded Reload +; RV32-NEXT: or t5, a5, t0 +; RV32-NEXT: andi a5, s7, 1 +; RV32-NEXT: sub ra, a7, s0 +; RV32-NEXT: snez a7, s4 +; RV32-NEXT: sltu t0, t6, a6 +; RV32-NEXT: sub t6, t6, a6 +; RV32-NEXT: add a7, s3, a7 +; RV32-NEXT: sub t4, t4, t0 +; RV32-NEXT: or a6, a1, t6 +; RV32-NEXT: addi a7, a7, 1 +; RV32-NEXT: or t0, t2, t4 +; RV32-NEXT: andi s3, a7, 1 +; RV32-NEXT: or a6, a6, t0 +; RV32-NEXT: or a6, a6, s3 +; RV32-NEXT: sub a4, a4, a3 +; RV32-NEXT: sw zero, 48(sp) # 4-byte Folded Spill +; RV32-NEXT: sw zero, 44(sp) # 4-byte Folded Spill +; RV32-NEXT: sw zero, 40(sp) # 4-byte Folded Spill +; RV32-NEXT: li s7, 0 +; RV32-NEXT: beqz a6, .LBB3_56 +; RV32-NEXT: .LBB3_45: # %udiv-do-while +; RV32-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32-NEXT: srli a3, a2, 31 +; RV32-NEXT: slli a6, ra, 1 +; RV32-NEXT: or t1, a6, a3 +; RV32-NEXT: srli a3, a0, 31 +; RV32-NEXT: slli a2, a2, 1 +; RV32-NEXT: or a2, a2, a3 +; RV32-NEXT: beq s6, t1, .LBB3_47 +; RV32-NEXT: # %bb.46: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB3_45 Depth=1 +; RV32-NEXT: sltu a3, s6, t1 +; RV32-NEXT: j .LBB3_48 +; RV32-NEXT: .LBB3_47: # in Loop: Header=BB3_45 Depth=1 +; RV32-NEXT: lw a3, 36(sp) # 4-byte Folded Reload +; RV32-NEXT: sltu a3, a3, a2 +; RV32-NEXT: .LBB3_48: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB3_45 Depth=1 +; RV32-NEXT: srli a6, a4, 31 +; RV32-NEXT: slli a0, a0, 1 +; RV32-NEXT: slli a4, a4, 1 +; RV32-NEXT: or a0, a0, a6 +; RV32-NEXT: andi a5, a5, 1 +; RV32-NEXT: or a4, a4, a5 +; RV32-NEXT: beq s5, a0, .LBB3_50 +; RV32-NEXT: # %bb.49: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB3_45 Depth=1 +; RV32-NEXT: sltu a5, s5, a0 +; RV32-NEXT: j .LBB3_51 +; RV32-NEXT: .LBB3_50: # in Loop: Header=BB3_45 Depth=1 +; RV32-NEXT: lw a5, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: sltu a5, a5, a4 +; RV32-NEXT: .LBB3_51: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB3_45 Depth=1 +; RV32-NEXT: lw a6, 36(sp) # 4-byte Folded Reload +; RV32-NEXT: xor a6, a6, a2 +; RV32-NEXT: xor a7, s6, t1 +; RV32-NEXT: or a6, a6, a7 +; RV32-NEXT: beqz a6, .LBB3_53 +; RV32-NEXT: # %bb.52: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB3_45 Depth=1 +; RV32-NEXT: mv a5, a3 +; RV32-NEXT: .LBB3_53: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB3_45 Depth=1 +; RV32-NEXT: srli a3, ra, 31 +; RV32-NEXT: lw a6, 16(sp) # 4-byte Folded Reload +; RV32-NEXT: sub a3, a6, a3 +; RV32-NEXT: sub a3, a3, a5 +; RV32-NEXT: slli a3, a3, 31 +; RV32-NEXT: srai a5, a3, 31 +; RV32-NEXT: lw a3, 24(sp) # 4-byte Folded Reload +; RV32-NEXT: and a7, a5, a3 +; RV32-NEXT: lw a3, 32(sp) # 4-byte Folded Reload +; RV32-NEXT: and a3, a5, a3 +; RV32-NEXT: lw a6, 20(sp) # 4-byte Folded Reload +; RV32-NEXT: and t0, a5, a6 +; RV32-NEXT: sltu a6, a4, a3 +; RV32-NEXT: mv ra, a6 +; RV32-NEXT: beq a0, t0, .LBB3_44 +; RV32-NEXT: # %bb.54: # %udiv-do-while +; RV32-NEXT: # in Loop: Header=BB3_45 Depth=1 +; RV32-NEXT: sltu ra, a0, t0 +; RV32-NEXT: j .LBB3_44 +; RV32-NEXT: .LBB3_55: +; RV32-NEXT: sw zero, 52(sp) # 4-byte Folded Spill +; RV32-NEXT: li s11, 0 +; RV32-NEXT: li s9, 0 +; RV32-NEXT: li s10, 0 +; RV32-NEXT: li s8, 0 +; RV32-NEXT: .LBB3_56: # %udiv-loop-exit +; RV32-NEXT: srli a0, s2, 31 +; RV32-NEXT: slli a1, t5, 1 +; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: srli a1, s1, 31 +; RV32-NEXT: slli s2, s2, 1 +; RV32-NEXT: or a2, s2, a1 +; RV32-NEXT: srli a3, t3, 31 +; RV32-NEXT: slli s1, s1, 1 +; RV32-NEXT: srli a4, t5, 31 +; RV32-NEXT: slli t3, t3, 1 +; RV32-NEXT: lw a1, 52(sp) # 4-byte Folded Reload +; RV32-NEXT: or a1, a1, t3 +; RV32-NEXT: or a3, s11, a3 +; RV32-NEXT: or a4, s8, a4 +; RV32-NEXT: or t5, a3, s1 +; RV32-NEXT: or t3, s9, a2 +; RV32-NEXT: or a5, s10, a0 +; RV32-NEXT: andi a2, a4, 1 +; RV32-NEXT: lw ra, 8(sp) # 4-byte Folded Reload +; RV32-NEXT: .LBB3_57: # %udiv-end +; RV32-NEXT: sw a1, 0(ra) +; RV32-NEXT: sw t5, 4(ra) +; RV32-NEXT: sw t3, 8(ra) +; RV32-NEXT: sw a5, 12(ra) +; RV32-NEXT: andi a2, a2, 1 +; RV32-NEXT: sb a2, 16(ra) +; RV32-NEXT: lw ra, 236(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 232(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s1, 228(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s2, 224(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s3, 220(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s4, 216(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s5, 212(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s6, 208(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s7, 204(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s8, 200(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s9, 196(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s10, 192(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s11, 188(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 240 +; RV32-NEXT: ret +; +; RV64-LABEL: udiv_i129: +; RV64: # %bb.0: # %_udiv-special-cases +; RV64-NEXT: ld a3, 0(a2) +; RV64-NEXT: ld a4, 8(a2) +; RV64-NEXT: ld t1, 16(a2) +; RV64-NEXT: lui a2, 349525 +; RV64-NEXT: lui a5, 209715 +; RV64-NEXT: lui a6, 61681 +; RV64-NEXT: addi t0, a2, 1365 +; RV64-NEXT: addi a7, a5, 819 +; RV64-NEXT: addi a6, a6, -241 +; RV64-NEXT: slli a2, t0, 32 +; RV64-NEXT: slli a5, a7, 32 +; RV64-NEXT: slli t2, a6, 32 +; RV64-NEXT: add t0, t0, a2 +; RV64-NEXT: add a7, a7, a5 +; RV64-NEXT: add a6, a6, t2 +; RV64-NEXT: srli a2, a4, 1 +; RV64-NEXT: slli a5, t1, 63 +; RV64-NEXT: slli t2, a4, 63 +; RV64-NEXT: or t3, a5, a2 +; RV64-NEXT: srli a2, a3, 1 +; RV64-NEXT: or t4, a2, t2 +; RV64-NEXT: bnez t3, .LBB3_2 +; RV64-NEXT: # %bb.1: # %_udiv-special-cases +; RV64-NEXT: srli a2, t4, 1 +; RV64-NEXT: or a2, t4, a2 +; RV64-NEXT: srli a5, a2, 2 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 4 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 8 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 16 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 32 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: not a2, a2 +; RV64-NEXT: srli a5, a2, 1 +; RV64-NEXT: and a5, a5, t0 +; RV64-NEXT: sub a2, a2, a5 +; RV64-NEXT: and a5, a2, a7 +; RV64-NEXT: srli a2, a2, 2 +; RV64-NEXT: and a2, a2, a7 +; RV64-NEXT: add a2, a5, a2 +; RV64-NEXT: srli a5, a2, 4 +; RV64-NEXT: add a2, a2, a5 +; RV64-NEXT: and a2, a2, a6 +; RV64-NEXT: slli a5, a2, 8 +; RV64-NEXT: add a2, a2, a5 +; RV64-NEXT: slli a5, a2, 16 +; RV64-NEXT: add a2, a2, a5 +; RV64-NEXT: slli a5, a2, 32 +; RV64-NEXT: add a2, a2, a5 +; RV64-NEXT: srli a2, a2, 56 +; RV64-NEXT: addi t2, a2, 64 +; RV64-NEXT: j .LBB3_3 +; RV64-NEXT: .LBB3_2: +; RV64-NEXT: srli a2, t3, 1 +; RV64-NEXT: or a2, t3, a2 +; RV64-NEXT: srli a5, a2, 2 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 4 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 8 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 16 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 32 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: not a2, a2 +; RV64-NEXT: srli a5, a2, 1 +; RV64-NEXT: and a5, a5, t0 +; RV64-NEXT: sub a2, a2, a5 +; RV64-NEXT: and a5, a2, a7 +; RV64-NEXT: srli a2, a2, 2 +; RV64-NEXT: and a2, a2, a7 +; RV64-NEXT: add a2, a5, a2 +; RV64-NEXT: srli a5, a2, 4 +; RV64-NEXT: add a2, a2, a5 +; RV64-NEXT: and a2, a2, a6 +; RV64-NEXT: slli a5, a2, 8 +; RV64-NEXT: add a2, a2, a5 +; RV64-NEXT: slli a5, a2, 16 +; RV64-NEXT: add a2, a2, a5 +; RV64-NEXT: slli a5, a2, 32 +; RV64-NEXT: add a2, a2, a5 +; RV64-NEXT: srli t2, a2, 56 +; RV64-NEXT: .LBB3_3: # %_udiv-special-cases +; RV64-NEXT: addi sp, sp, -192 +; RV64-NEXT: sd s0, 184(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s1, 176(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s2, 168(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s3, 160(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s4, 152(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s5, 144(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s6, 136(sp) # 8-byte Folded Spill +; RV64-NEXT: slli a2, a3, 63 +; RV64-NEXT: li t5, 128 +; RV64-NEXT: bnez a2, .LBB3_5 +; RV64-NEXT: # %bb.4: # %_udiv-special-cases +; RV64-NEXT: li s0, 128 +; RV64-NEXT: j .LBB3_6 +; RV64-NEXT: .LBB3_5: +; RV64-NEXT: srli a5, a2, 1 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 2 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 4 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 8 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 16 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: srli a5, a2, 32 +; RV64-NEXT: or a2, a2, a5 +; RV64-NEXT: not a2, a2 +; RV64-NEXT: srli a5, a2, 1 +; RV64-NEXT: and a5, a5, t0 +; RV64-NEXT: sub a2, a2, a5 +; RV64-NEXT: and a5, a2, a7 +; RV64-NEXT: srli a2, a2, 2 +; RV64-NEXT: and a2, a2, a7 +; RV64-NEXT: add a2, a5, a2 +; RV64-NEXT: srli a5, a2, 4 +; RV64-NEXT: add a2, a2, a5 +; RV64-NEXT: and a2, a2, a6 +; RV64-NEXT: slli a5, a2, 8 +; RV64-NEXT: add a2, a2, a5 +; RV64-NEXT: slli a5, a2, 16 +; RV64-NEXT: add a2, a2, a5 +; RV64-NEXT: slli a5, a2, 32 +; RV64-NEXT: add a2, a2, a5 +; RV64-NEXT: srli s0, a2, 56 +; RV64-NEXT: .LBB3_6: # %_udiv-special-cases +; RV64-NEXT: ld a5, 0(a1) +; RV64-NEXT: ld a2, 8(a1) +; RV64-NEXT: ld s2, 16(a1) +; RV64-NEXT: or a1, t4, t3 +; RV64-NEXT: addi s1, s0, 128 +; RV64-NEXT: bnez a1, .LBB3_8 +; RV64-NEXT: # %bb.7: # %_udiv-special-cases +; RV64-NEXT: mv t2, s1 +; RV64-NEXT: .LBB3_8: # %_udiv-special-cases +; RV64-NEXT: snez s3, a1 +; RV64-NEXT: srli a1, a2, 1 +; RV64-NEXT: slli t3, s2, 63 +; RV64-NEXT: slli t4, a2, 63 +; RV64-NEXT: or a1, t3, a1 +; RV64-NEXT: srli t3, a5, 1 +; RV64-NEXT: or t6, t3, t4 +; RV64-NEXT: bnez a1, .LBB3_10 +; RV64-NEXT: # %bb.9: # %_udiv-special-cases +; RV64-NEXT: srli t3, t6, 1 +; RV64-NEXT: or t3, t6, t3 +; RV64-NEXT: srli t4, t3, 2 +; RV64-NEXT: or t3, t3, t4 +; RV64-NEXT: srli t4, t3, 4 +; RV64-NEXT: or t3, t3, t4 +; RV64-NEXT: srli t4, t3, 8 +; RV64-NEXT: or t3, t3, t4 +; RV64-NEXT: srli t4, t3, 16 +; RV64-NEXT: or t3, t3, t4 +; RV64-NEXT: srli t4, t3, 32 +; RV64-NEXT: or t3, t3, t4 +; RV64-NEXT: not t3, t3 +; RV64-NEXT: srli t4, t3, 1 +; RV64-NEXT: and t4, t4, t0 +; RV64-NEXT: sub t3, t3, t4 +; RV64-NEXT: and t4, t3, a7 +; RV64-NEXT: srli t3, t3, 2 +; RV64-NEXT: and t3, t3, a7 +; RV64-NEXT: add t3, t4, t3 +; RV64-NEXT: srli t4, t3, 4 +; RV64-NEXT: add t3, t3, t4 +; RV64-NEXT: and t3, t3, a6 +; RV64-NEXT: slli t4, t3, 8 +; RV64-NEXT: add t3, t3, t4 +; RV64-NEXT: slli t4, t3, 16 +; RV64-NEXT: add t3, t3, t4 +; RV64-NEXT: slli t4, t3, 32 +; RV64-NEXT: add t3, t3, t4 +; RV64-NEXT: srli t3, t3, 56 +; RV64-NEXT: addi s4, t3, 64 +; RV64-NEXT: j .LBB3_11 +; RV64-NEXT: .LBB3_10: +; RV64-NEXT: srli t3, a1, 1 +; RV64-NEXT: or t3, a1, t3 +; RV64-NEXT: srli t4, t3, 2 +; RV64-NEXT: or t3, t3, t4 +; RV64-NEXT: srli t4, t3, 4 +; RV64-NEXT: or t3, t3, t4 +; RV64-NEXT: srli t4, t3, 8 +; RV64-NEXT: or t3, t3, t4 +; RV64-NEXT: srli t4, t3, 16 +; RV64-NEXT: or t3, t3, t4 +; RV64-NEXT: srli t4, t3, 32 +; RV64-NEXT: or t3, t3, t4 +; RV64-NEXT: not t3, t3 +; RV64-NEXT: srli t4, t3, 1 +; RV64-NEXT: and t4, t4, t0 +; RV64-NEXT: sub t3, t3, t4 +; RV64-NEXT: and t4, t3, a7 +; RV64-NEXT: srli t3, t3, 2 +; RV64-NEXT: and t3, t3, a7 +; RV64-NEXT: add t3, t4, t3 +; RV64-NEXT: srli t4, t3, 4 +; RV64-NEXT: add t3, t3, t4 +; RV64-NEXT: and t3, t3, a6 +; RV64-NEXT: slli t4, t3, 8 +; RV64-NEXT: add t3, t3, t4 +; RV64-NEXT: slli t4, t3, 16 +; RV64-NEXT: add t3, t3, t4 +; RV64-NEXT: slli t4, t3, 32 +; RV64-NEXT: add t3, t3, t4 +; RV64-NEXT: srli s4, t3, 56 +; RV64-NEXT: .LBB3_11: # %_udiv-special-cases +; RV64-NEXT: andi t4, s2, 1 +; RV64-NEXT: andi t1, t1, 1 +; RV64-NEXT: or t3, a3, a4 +; RV64-NEXT: or s2, a5, a2 +; RV64-NEXT: sltu s0, s1, s0 +; RV64-NEXT: slli s1, a5, 63 +; RV64-NEXT: addi s3, s3, -1 +; RV64-NEXT: beqz s1, .LBB3_13 +; RV64-NEXT: # %bb.12: +; RV64-NEXT: srli t5, s1, 1 +; RV64-NEXT: or t5, s1, t5 +; RV64-NEXT: srli s1, t5, 2 +; RV64-NEXT: or t5, t5, s1 +; RV64-NEXT: srli s1, t5, 4 +; RV64-NEXT: or t5, t5, s1 +; RV64-NEXT: srli s1, t5, 8 +; RV64-NEXT: or t5, t5, s1 +; RV64-NEXT: srli s1, t5, 16 +; RV64-NEXT: or t5, t5, s1 +; RV64-NEXT: srli s1, t5, 32 +; RV64-NEXT: or t5, t5, s1 +; RV64-NEXT: not t5, t5 +; RV64-NEXT: srli s1, t5, 1 +; RV64-NEXT: and t0, s1, t0 +; RV64-NEXT: sub t0, t5, t0 +; RV64-NEXT: and t5, t0, a7 +; RV64-NEXT: srli t0, t0, 2 +; RV64-NEXT: and a7, t0, a7 +; RV64-NEXT: add a7, t5, a7 +; RV64-NEXT: srli t0, a7, 4 +; RV64-NEXT: add a7, a7, t0 +; RV64-NEXT: and a6, a7, a6 +; RV64-NEXT: slli a7, a6, 8 +; RV64-NEXT: add a6, a6, a7 +; RV64-NEXT: slli a7, a6, 16 +; RV64-NEXT: add a6, a6, a7 +; RV64-NEXT: slli a7, a6, 32 +; RV64-NEXT: add a6, a6, a7 +; RV64-NEXT: srli t5, a6, 56 +; RV64-NEXT: .LBB3_13: # %_udiv-special-cases +; RV64-NEXT: or t0, t3, t1 +; RV64-NEXT: or a6, s2, t4 +; RV64-NEXT: and a7, s3, s0 +; RV64-NEXT: or t6, t6, a1 +; RV64-NEXT: addi s0, t5, 128 +; RV64-NEXT: bnez t6, .LBB3_15 +; RV64-NEXT: # %bb.14: # %_udiv-special-cases +; RV64-NEXT: mv s4, s0 +; RV64-NEXT: .LBB3_15: # %_udiv-special-cases +; RV64-NEXT: seqz a1, t0 +; RV64-NEXT: sltu t0, s0, t5 +; RV64-NEXT: snez t5, t6 +; RV64-NEXT: addi t5, t5, -1 +; RV64-NEXT: and t0, t5, t0 +; RV64-NEXT: sltu t5, t2, s4 +; RV64-NEXT: seqz a6, a6 +; RV64-NEXT: mv t6, t5 +; RV64-NEXT: beq a7, t0, .LBB3_17 +; RV64-NEXT: # %bb.16: # %_udiv-special-cases +; RV64-NEXT: sltu t6, a7, t0 +; RV64-NEXT: .LBB3_17: # %_udiv-special-cases +; RV64-NEXT: or a1, a1, a6 +; RV64-NEXT: andi a6, t6, 1 +; RV64-NEXT: sub a7, a7, t0 +; RV64-NEXT: sub t5, a7, t5 +; RV64-NEXT: sub a7, t2, s4 +; RV64-NEXT: beqz a6, .LBB3_19 +; RV64-NEXT: # %bb.18: # %_udiv-special-cases +; RV64-NEXT: mv t0, a6 +; RV64-NEXT: j .LBB3_20 +; RV64-NEXT: .LBB3_19: +; RV64-NEXT: sltiu t0, a7, 129 +; RV64-NEXT: xori t0, t0, 1 +; RV64-NEXT: snez t2, t5 +; RV64-NEXT: or t0, t0, t2 +; RV64-NEXT: .LBB3_20: # %_udiv-special-cases +; RV64-NEXT: or t6, a1, t0 +; RV64-NEXT: addi a1, t6, -1 +; RV64-NEXT: and t2, t4, a1 +; RV64-NEXT: and t0, a1, a2 +; RV64-NEXT: and a1, a1, a5 +; RV64-NEXT: bnez t6, .LBB3_30 +; RV64-NEXT: # %bb.21: # %_udiv-special-cases +; RV64-NEXT: xori t6, a7, 128 +; RV64-NEXT: or t6, t6, a6 +; RV64-NEXT: or t6, t6, t5 +; RV64-NEXT: beqz t6, .LBB3_30 +; RV64-NEXT: # %bb.22: # %udiv-bb1 +; RV64-NEXT: addi a1, a7, 1 +; RV64-NEXT: sd zero, 64(sp) +; RV64-NEXT: sd zero, 72(sp) +; RV64-NEXT: sd zero, 80(sp) +; RV64-NEXT: sd zero, 88(sp) +; RV64-NEXT: sd a5, 96(sp) +; RV64-NEXT: sd a2, 104(sp) +; RV64-NEXT: sd t4, 112(sp) +; RV64-NEXT: li t0, 128 +; RV64-NEXT: addi t2, sp, 96 +; RV64-NEXT: neg s1, a7 +; RV64-NEXT: seqz t6, a1 +; RV64-NEXT: sub a7, t0, a7 +; RV64-NEXT: add t5, t5, t6 +; RV64-NEXT: andi t0, a7, 63 +; RV64-NEXT: srli a7, a7, 3 +; RV64-NEXT: or t6, a1, t5 +; RV64-NEXT: xori s2, t0, 63 +; RV64-NEXT: andi a7, a7, 24 +; RV64-NEXT: seqz t0, t6 +; RV64-NEXT: sub s3, t2, a7 +; RV64-NEXT: add a6, a6, t0 +; RV64-NEXT: ld t2, 0(s3) +; RV64-NEXT: ld s4, 8(s3) +; RV64-NEXT: andi a7, a6, 1 +; RV64-NEXT: or t6, t6, a7 +; RV64-NEXT: srli a6, t2, 1 +; RV64-NEXT: sll t0, s4, s1 +; RV64-NEXT: srl a6, a6, s2 +; RV64-NEXT: or t0, t0, a6 +; RV64-NEXT: sll a6, t2, s1 +; RV64-NEXT: li t2, 0 +; RV64-NEXT: beqz t6, .LBB3_28 +; RV64-NEXT: # %bb.23: # %udiv-preheader +; RV64-NEXT: li t6, 0 +; RV64-NEXT: li s0, 0 +; RV64-NEXT: srli s4, s4, 1 +; RV64-NEXT: ld s3, 16(s3) +; RV64-NEXT: sd zero, 32(sp) +; RV64-NEXT: sd zero, 40(sp) +; RV64-NEXT: sd zero, 48(sp) +; RV64-NEXT: sd zero, 56(sp) +; RV64-NEXT: sd a5, 0(sp) +; RV64-NEXT: sd a2, 8(sp) +; RV64-NEXT: sd t4, 16(sp) +; RV64-NEXT: sd zero, 24(sp) +; RV64-NEXT: srli a2, a1, 3 +; RV64-NEXT: srl a5, s4, s2 +; RV64-NEXT: mv t4, sp +; RV64-NEXT: snez t3, t3 +; RV64-NEXT: andi a2, a2, 24 +; RV64-NEXT: add t1, t1, t3 +; RV64-NEXT: add a2, t4, a2 +; RV64-NEXT: ld t3, 0(a2) +; RV64-NEXT: ld t4, 8(a2) +; RV64-NEXT: ld a2, 16(a2) +; RV64-NEXT: sll s1, s3, s1 +; RV64-NEXT: andi s2, a1, 63 +; RV64-NEXT: xori s2, s2, 63 +; RV64-NEXT: or s3, s1, a5 +; RV64-NEXT: slli a2, a2, 1 +; RV64-NEXT: slli a5, t4, 1 +; RV64-NEXT: sll a2, a2, s2 +; RV64-NEXT: sll s2, a5, s2 +; RV64-NEXT: srl s1, t4, a1 +; RV64-NEXT: or s1, s1, a2 +; RV64-NEXT: seqz a2, a3 +; RV64-NEXT: sub a2, a4, a2 +; RV64-NEXT: addi a5, t1, 1 +; RV64-NEXT: andi a5, a5, 1 +; RV64-NEXT: andi s3, s3, 1 +; RV64-NEXT: srl t1, t3, a1 +; RV64-NEXT: or s2, t1, s2 +; RV64-NEXT: addi t1, a3, -1 +; RV64-NEXT: j .LBB3_26 +; RV64-NEXT: .LBB3_24: # %udiv-do-while +; RV64-NEXT: # in Loop: Header=BB3_26 Depth=1 +; RV64-NEXT: sltu t3, a2, s4 +; RV64-NEXT: .LBB3_25: # %udiv-do-while +; RV64-NEXT: # in Loop: Header=BB3_26 Depth=1 +; RV64-NEXT: srli s1, s1, 63 +; RV64-NEXT: sub t4, a5, s1 +; RV64-NEXT: sub t3, t4, t3 +; RV64-NEXT: slli t3, t3, 63 +; RV64-NEXT: srai s1, t3, 63 +; RV64-NEXT: and s3, s1, a4 +; RV64-NEXT: li t3, 0 +; RV64-NEXT: li t4, 0 +; RV64-NEXT: srli s5, a6, 63 +; RV64-NEXT: sub s4, s4, s3 +; RV64-NEXT: slli s3, t0, 1 +; RV64-NEXT: or s3, s3, s5 +; RV64-NEXT: srli t0, t0, 63 +; RV64-NEXT: slli a6, a6, 1 +; RV64-NEXT: or a6, t2, a6 +; RV64-NEXT: seqz t2, a1 +; RV64-NEXT: or s0, s0, t0 +; RV64-NEXT: or s5, a1, t5 +; RV64-NEXT: sub t5, t5, t2 +; RV64-NEXT: and s6, s1, a3 +; RV64-NEXT: addi a1, a1, -1 +; RV64-NEXT: andi t2, s1, 1 +; RV64-NEXT: or t0, t6, s3 +; RV64-NEXT: sltu t6, s2, s6 +; RV64-NEXT: snez s5, s5 +; RV64-NEXT: andi s3, s0, 1 +; RV64-NEXT: sub s1, s4, t6 +; RV64-NEXT: add a7, a7, s5 +; RV64-NEXT: addi a7, a7, 1 +; RV64-NEXT: andi a7, a7, 1 +; RV64-NEXT: or t6, a1, t5 +; RV64-NEXT: or s4, t6, a7 +; RV64-NEXT: sub s2, s2, s6 +; RV64-NEXT: li t6, 0 +; RV64-NEXT: li s0, 0 +; RV64-NEXT: beqz s4, .LBB3_29 +; RV64-NEXT: .LBB3_26: # %udiv-do-while +; RV64-NEXT: # =>This Inner Loop Header: Depth=1 +; RV64-NEXT: srli t3, s2, 63 +; RV64-NEXT: slli t4, s1, 1 +; RV64-NEXT: slli s2, s2, 1 +; RV64-NEXT: or s4, t4, t3 +; RV64-NEXT: andi t3, s3, 1 +; RV64-NEXT: or s2, s2, t3 +; RV64-NEXT: bne a2, s4, .LBB3_24 +; RV64-NEXT: # %bb.27: # in Loop: Header=BB3_26 Depth=1 +; RV64-NEXT: sltu t3, t1, s2 +; RV64-NEXT: j .LBB3_25 +; RV64-NEXT: .LBB3_28: +; RV64-NEXT: li t3, 0 +; RV64-NEXT: li t4, 0 +; RV64-NEXT: .LBB3_29: # %udiv-loop-exit +; RV64-NEXT: srli a2, a6, 63 +; RV64-NEXT: slli a3, t0, 1 +; RV64-NEXT: srli a4, t0, 63 +; RV64-NEXT: slli a6, a6, 1 +; RV64-NEXT: or a1, t2, a6 +; RV64-NEXT: or a2, t3, a2 +; RV64-NEXT: or a4, t4, a4 +; RV64-NEXT: or t0, a2, a3 +; RV64-NEXT: andi t2, a4, 1 +; RV64-NEXT: .LBB3_30: # %udiv-end +; RV64-NEXT: andi a2, t2, 1 +; RV64-NEXT: sd a1, 0(a0) +; RV64-NEXT: sd t0, 8(a0) +; RV64-NEXT: sb a2, 16(a0) +; RV64-NEXT: ld s0, 184(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s1, 176(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s2, 168(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s3, 160(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s4, 152(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s5, 144(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s6, 136(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 192 +; RV64-NEXT: ret %res = udiv i129 %x, %y ret i129 %res } diff --git a/llvm/test/CodeGen/RISCV/min-max.ll b/llvm/test/CodeGen/RISCV/min-max.ll index acde8ad..e7f6899 100644 --- a/llvm/test/CodeGen/RISCV/min-max.ll +++ b/llvm/test/CodeGen/RISCV/min-max.ll @@ -5,6 +5,12 @@ ; RUN: FileCheck %s --check-prefixes=ZBB,RV32ZBB ; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb | \ ; RUN: FileCheck %s --check-prefixes=ZBB,RV64ZBB +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s | \ +; RUN: FileCheck %s --check-prefixes=XQCI +; RUN: llc < %s -mtriple=riscv32 -mattr=+short-forward-branch-opt | \ +; RUN: FileCheck %s --check-prefixes=RV32I-SFB +; RUN: llc < %s -mtriple=riscv64 -mattr=+short-forward-branch-opt | \ +; RUN: FileCheck %s --check-prefixes=RV64I-SFB ; Basic tests. @@ -23,6 +29,27 @@ define signext i8 @smax_i8(i8 signext %a, i8 signext %b) { ; ZBB: # %bb.0: ; ZBB-NEXT: max a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: smax_i8: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.mvge a0, a1, a0, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smax_i8: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: blt a1, a0, .LBB0_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB0_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smax_i8: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: blt a1, a0, .LBB0_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB0_2: +; RV64I-SFB-NEXT: ret %c = call i8 @llvm.smax.i8(i8 %a, i8 %b) ret i8 %c } @@ -42,6 +69,27 @@ define signext i16 @smax_i16(i16 signext %a, i16 signext %b) { ; ZBB: # %bb.0: ; ZBB-NEXT: max a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: smax_i16: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.mvge a0, a1, a0, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smax_i16: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: blt a1, a0, .LBB1_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB1_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smax_i16: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: blt a1, a0, .LBB1_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB1_2: +; RV64I-SFB-NEXT: ret %c = call i16 @llvm.smax.i16(i16 %a, i16 %b) ret i16 %c } @@ -61,6 +109,27 @@ define signext i32 @smax_i32(i32 signext %a, i32 signext %b) { ; ZBB: # %bb.0: ; ZBB-NEXT: max a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: smax_i32: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.mvge a0, a1, a0, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smax_i32: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: blt a1, a0, .LBB2_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB2_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smax_i32: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: blt a1, a0, .LBB2_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB2_2: +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.smax.i32(i32 %a, i32 %b) ret i32 %c } @@ -112,6 +181,41 @@ define i64 @smax_i64(i64 %a, i64 %b) { ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: max a0, a0, a1 ; RV64ZBB-NEXT: ret +; +; XQCI-LABEL: smax_i64: +; XQCI: # %bb.0: +; XQCI-NEXT: sltu a4, a2, a0 +; XQCI-NEXT: slt a5, a3, a1 +; XQCI-NEXT: qc.mveq a5, a1, a3, a4 +; XQCI-NEXT: qc.mveqi a0, a5, 0, a2 +; XQCI-NEXT: qc.mveqi a1, a5, 0, a3 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smax_i64: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: sltu a4, a2, a0 +; RV32I-SFB-NEXT: slt a5, a3, a1 +; RV32I-SFB-NEXT: bne a1, a3, .LBB3_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a5, a4 +; RV32I-SFB-NEXT: .LBB3_2: +; RV32I-SFB-NEXT: bnez a5, .LBB3_4 +; RV32I-SFB-NEXT: # %bb.3: +; RV32I-SFB-NEXT: mv a0, a2 +; RV32I-SFB-NEXT: .LBB3_4: +; RV32I-SFB-NEXT: bnez a5, .LBB3_6 +; RV32I-SFB-NEXT: # %bb.5: +; RV32I-SFB-NEXT: mv a1, a3 +; RV32I-SFB-NEXT: .LBB3_6: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smax_i64: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: blt a1, a0, .LBB3_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB3_2: +; RV64I-SFB-NEXT: ret %c = call i64 @llvm.smax.i64(i64 %a, i64 %b) ret i64 %c } @@ -131,6 +235,27 @@ define signext i8 @smin_i8(i8 signext %a, i8 signext %b) { ; ZBB: # %bb.0: ; ZBB-NEXT: min a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: smin_i8: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.mvge a0, a0, a1, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smin_i8: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: blt a0, a1, .LBB4_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB4_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smin_i8: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: blt a0, a1, .LBB4_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB4_2: +; RV64I-SFB-NEXT: ret %c = call i8 @llvm.smin.i8(i8 %a, i8 %b) ret i8 %c } @@ -150,6 +275,27 @@ define signext i16 @smin_i16(i16 signext %a, i16 signext %b) { ; ZBB: # %bb.0: ; ZBB-NEXT: min a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: smin_i16: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.mvge a0, a0, a1, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smin_i16: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: blt a0, a1, .LBB5_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB5_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smin_i16: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: blt a0, a1, .LBB5_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB5_2: +; RV64I-SFB-NEXT: ret %c = call i16 @llvm.smin.i16(i16 %a, i16 %b) ret i16 %c } @@ -169,6 +315,27 @@ define signext i32 @smin_i32(i32 signext %a, i32 signext %b) { ; ZBB: # %bb.0: ; ZBB-NEXT: min a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: smin_i32: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.mvge a0, a0, a1, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smin_i32: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: blt a0, a1, .LBB6_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB6_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smin_i32: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: blt a0, a1, .LBB6_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB6_2: +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.smin.i32(i32 %a, i32 %b) ret i32 %c } @@ -220,6 +387,41 @@ define i64 @smin_i64(i64 %a, i64 %b) { ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: min a0, a0, a1 ; RV64ZBB-NEXT: ret +; +; XQCI-LABEL: smin_i64: +; XQCI: # %bb.0: +; XQCI-NEXT: sltu a4, a0, a2 +; XQCI-NEXT: slt a5, a1, a3 +; XQCI-NEXT: qc.mveq a5, a1, a3, a4 +; XQCI-NEXT: qc.mveqi a0, a5, 0, a2 +; XQCI-NEXT: qc.mveqi a1, a5, 0, a3 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smin_i64: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: sltu a4, a0, a2 +; RV32I-SFB-NEXT: slt a5, a1, a3 +; RV32I-SFB-NEXT: bne a1, a3, .LBB7_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a5, a4 +; RV32I-SFB-NEXT: .LBB7_2: +; RV32I-SFB-NEXT: bnez a5, .LBB7_4 +; RV32I-SFB-NEXT: # %bb.3: +; RV32I-SFB-NEXT: mv a0, a2 +; RV32I-SFB-NEXT: .LBB7_4: +; RV32I-SFB-NEXT: bnez a5, .LBB7_6 +; RV32I-SFB-NEXT: # %bb.5: +; RV32I-SFB-NEXT: mv a1, a3 +; RV32I-SFB-NEXT: .LBB7_6: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smin_i64: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: blt a0, a1, .LBB7_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB7_2: +; RV64I-SFB-NEXT: ret %c = call i64 @llvm.smin.i64(i64 %a, i64 %b) ret i64 %c } @@ -239,6 +441,27 @@ define i8 @umax_i8(i8 zeroext %a, i8 zeroext %b) { ; ZBB: # %bb.0: ; ZBB-NEXT: maxu a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: umax_i8: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.mvgeu a0, a1, a0, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umax_i8: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: bltu a1, a0, .LBB8_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB8_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umax_i8: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: bltu a1, a0, .LBB8_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB8_2: +; RV64I-SFB-NEXT: ret %c = call i8 @llvm.umax.i8(i8 %a, i8 %b) ret i8 %c } @@ -258,6 +481,27 @@ define i16 @umax_i16(i16 zeroext %a, i16 zeroext %b) { ; ZBB: # %bb.0: ; ZBB-NEXT: maxu a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: umax_i16: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.mvgeu a0, a1, a0, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umax_i16: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: bltu a1, a0, .LBB9_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB9_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umax_i16: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: bltu a1, a0, .LBB9_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB9_2: +; RV64I-SFB-NEXT: ret %c = call i16 @llvm.umax.i16(i16 %a, i16 %b) ret i16 %c } @@ -277,6 +521,27 @@ define signext i32 @umax_i32(i32 signext %a, i32 signext %b) { ; ZBB: # %bb.0: ; ZBB-NEXT: maxu a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: umax_i32: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.mvgeu a0, a1, a0, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umax_i32: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: bltu a1, a0, .LBB10_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB10_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umax_i32: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: bltu a1, a0, .LBB10_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB10_2: +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.umax.i32(i32 %a, i32 %b) ret i32 %c } @@ -328,6 +593,41 @@ define i64 @umax_i64(i64 %a, i64 %b) { ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: maxu a0, a0, a1 ; RV64ZBB-NEXT: ret +; +; XQCI-LABEL: umax_i64: +; XQCI: # %bb.0: +; XQCI-NEXT: sltu a4, a2, a0 +; XQCI-NEXT: sltu a5, a3, a1 +; XQCI-NEXT: qc.mveq a5, a1, a3, a4 +; XQCI-NEXT: qc.mveqi a0, a5, 0, a2 +; XQCI-NEXT: qc.mveqi a1, a5, 0, a3 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umax_i64: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: sltu a4, a2, a0 +; RV32I-SFB-NEXT: sltu a5, a3, a1 +; RV32I-SFB-NEXT: bne a1, a3, .LBB11_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a5, a4 +; RV32I-SFB-NEXT: .LBB11_2: +; RV32I-SFB-NEXT: bnez a5, .LBB11_4 +; RV32I-SFB-NEXT: # %bb.3: +; RV32I-SFB-NEXT: mv a0, a2 +; RV32I-SFB-NEXT: .LBB11_4: +; RV32I-SFB-NEXT: bnez a5, .LBB11_6 +; RV32I-SFB-NEXT: # %bb.5: +; RV32I-SFB-NEXT: mv a1, a3 +; RV32I-SFB-NEXT: .LBB11_6: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umax_i64: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: bltu a1, a0, .LBB11_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB11_2: +; RV64I-SFB-NEXT: ret %c = call i64 @llvm.umax.i64(i64 %a, i64 %b) ret i64 %c } @@ -347,6 +647,27 @@ define zeroext i8 @umin_i8(i8 zeroext %a, i8 zeroext %b) { ; ZBB: # %bb.0: ; ZBB-NEXT: minu a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: umin_i8: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.mvgeu a0, a0, a1, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umin_i8: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: bltu a0, a1, .LBB12_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB12_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umin_i8: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: bltu a0, a1, .LBB12_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB12_2: +; RV64I-SFB-NEXT: ret %c = call i8 @llvm.umin.i8(i8 %a, i8 %b) ret i8 %c } @@ -366,6 +687,27 @@ define zeroext i16 @umin_i16(i16 zeroext %a, i16 zeroext %b) { ; ZBB: # %bb.0: ; ZBB-NEXT: minu a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: umin_i16: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.mvgeu a0, a0, a1, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umin_i16: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: bltu a0, a1, .LBB13_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB13_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umin_i16: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: bltu a0, a1, .LBB13_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB13_2: +; RV64I-SFB-NEXT: ret %c = call i16 @llvm.umin.i16(i16 %a, i16 %b) ret i16 %c } @@ -385,6 +727,27 @@ define signext i32 @umin_i32(i32 signext %a, i32 signext %b) { ; ZBB: # %bb.0: ; ZBB-NEXT: minu a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: umin_i32: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.mvgeu a0, a0, a1, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umin_i32: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: bltu a0, a1, .LBB14_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB14_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umin_i32: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: bltu a0, a1, .LBB14_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB14_2: +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.umin.i32(i32 %a, i32 %b) ret i32 %c } @@ -436,6 +799,41 @@ define i64 @umin_i64(i64 %a, i64 %b) { ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: minu a0, a0, a1 ; RV64ZBB-NEXT: ret +; +; XQCI-LABEL: umin_i64: +; XQCI: # %bb.0: +; XQCI-NEXT: sltu a4, a0, a2 +; XQCI-NEXT: sltu a5, a1, a3 +; XQCI-NEXT: qc.mveq a5, a1, a3, a4 +; XQCI-NEXT: qc.mveqi a0, a5, 0, a2 +; XQCI-NEXT: qc.mveqi a1, a5, 0, a3 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umin_i64: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: sltu a4, a0, a2 +; RV32I-SFB-NEXT: sltu a5, a1, a3 +; RV32I-SFB-NEXT: bne a1, a3, .LBB15_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a5, a4 +; RV32I-SFB-NEXT: .LBB15_2: +; RV32I-SFB-NEXT: bnez a5, .LBB15_4 +; RV32I-SFB-NEXT: # %bb.3: +; RV32I-SFB-NEXT: mv a0, a2 +; RV32I-SFB-NEXT: .LBB15_4: +; RV32I-SFB-NEXT: bnez a5, .LBB15_6 +; RV32I-SFB-NEXT: # %bb.5: +; RV32I-SFB-NEXT: mv a1, a3 +; RV32I-SFB-NEXT: .LBB15_6: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umin_i64: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: bltu a0, a1, .LBB15_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB15_2: +; RV64I-SFB-NEXT: ret %c = call i64 @llvm.umin.i64(i64 %a, i64 %b) ret i64 %c } @@ -450,6 +848,18 @@ define signext i32 @smin_same_op_i32(i32 signext %a) { ; ZBB-LABEL: smin_same_op_i32: ; ZBB: # %bb.0: ; ZBB-NEXT: ret +; +; XQCI-LABEL: smin_same_op_i32: +; XQCI: # %bb.0: +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smin_same_op_i32: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smin_same_op_i32: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.smin.i32(i32 %a, i32 %a) ret i32 %c } @@ -462,6 +872,18 @@ define signext i32 @smax_same_op_i32(i32 signext %a) { ; ZBB-LABEL: smax_same_op_i32: ; ZBB: # %bb.0: ; ZBB-NEXT: ret +; +; XQCI-LABEL: smax_same_op_i32: +; XQCI: # %bb.0: +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smax_same_op_i32: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smax_same_op_i32: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.smax.i32(i32 %a, i32 %a) ret i32 %c } @@ -474,6 +896,18 @@ define signext i32 @umin_same_op_i32(i32 signext %a) { ; ZBB-LABEL: umin_same_op_i32: ; ZBB: # %bb.0: ; ZBB-NEXT: ret +; +; XQCI-LABEL: umin_same_op_i32: +; XQCI: # %bb.0: +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umin_same_op_i32: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umin_same_op_i32: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.umin.i32(i32 %a, i32 %a) ret i32 %c } @@ -486,6 +920,18 @@ define signext i32 @umax_same_op_i32(i32 signext %a) { ; ZBB-LABEL: umax_same_op_i32: ; ZBB: # %bb.0: ; ZBB-NEXT: ret +; +; XQCI-LABEL: umax_same_op_i32: +; XQCI: # %bb.0: +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umax_same_op_i32: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umax_same_op_i32: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.umax.i32(i32 %a, i32 %a) ret i32 %c } @@ -510,6 +956,19 @@ define signext i32 @smin_undef_i32() { ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: li a0, 0 ; RV64ZBB-NEXT: ret +; +; XQCI-LABEL: smin_undef_i32: +; XQCI: # %bb.0: +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smin_undef_i32: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smin_undef_i32: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: li a0, 0 +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.smin.i32(i32 undef, i32 undef) ret i32 %c } @@ -532,6 +991,19 @@ define signext i32 @smax_undef_i32() { ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: li a0, 0 ; RV64ZBB-NEXT: ret +; +; XQCI-LABEL: smax_undef_i32: +; XQCI: # %bb.0: +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smax_undef_i32: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smax_undef_i32: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: li a0, 0 +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.smax.i32(i32 undef, i32 undef) ret i32 %c } @@ -554,6 +1026,19 @@ define signext i32 @umin_undef_i32() { ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: li a0, 0 ; RV64ZBB-NEXT: ret +; +; XQCI-LABEL: umin_undef_i32: +; XQCI: # %bb.0: +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umin_undef_i32: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umin_undef_i32: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: li a0, 0 +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.umin.i32(i32 undef, i32 undef) ret i32 %c } @@ -576,6 +1061,19 @@ define signext i32 @umax_undef_i32() { ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: li a0, 0 ; RV64ZBB-NEXT: ret +; +; XQCI-LABEL: umax_undef_i32: +; XQCI: # %bb.0: +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umax_undef_i32: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umax_undef_i32: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: li a0, 0 +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.umax.i32(i32 undef, i32 undef) ret i32 %c } @@ -595,6 +1093,29 @@ define signext i32 @smax_i32_pos_constant(i32 signext %a) { ; ZBB-NEXT: li a1, 10 ; ZBB-NEXT: max a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: smax_i32_pos_constant: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.lilti a0, a0, 11, 10 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smax_i32_pos_constant: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: li a1, 10 +; RV32I-SFB-NEXT: blt a1, a0, .LBB24_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB24_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smax_i32_pos_constant: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: li a1, 10 +; RV64I-SFB-NEXT: blt a1, a0, .LBB24_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB24_2: +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.smax.i32(i32 %a, i32 10) ret i32 %c } @@ -616,6 +1137,33 @@ define signext i32 @smax_i32_pos_constant_trailing_zeros(i32 signext %a) { ; ZBB-NEXT: li a1, 16 ; ZBB-NEXT: max a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: smax_i32_pos_constant_trailing_zeros: +; XQCI: # %bb.0: +; XQCI-NEXT: andi a1, a0, -8 +; XQCI-NEXT: li a0, 16 +; XQCI-NEXT: qc.mvlt a0, a0, a1, a1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smax_i32_pos_constant_trailing_zeros: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: andi a1, a0, -8 +; RV32I-SFB-NEXT: li a0, 16 +; RV32I-SFB-NEXT: bge a0, a1, .LBB25_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB25_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smax_i32_pos_constant_trailing_zeros: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: andi a1, a0, -8 +; RV64I-SFB-NEXT: li a0, 16 +; RV64I-SFB-NEXT: bge a0, a1, .LBB25_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB25_2: +; RV64I-SFB-NEXT: ret %b = and i32 %a, -8 %c = call i32 @llvm.smax.i32(i32 %b, i32 16) %d = and i32 %c, -4 @@ -635,6 +1183,29 @@ define signext i32 @smin_i32_negone(i32 signext %a) { ; ZBB-NEXT: li a1, -1 ; ZBB-NEXT: min a0, a0, a1 ; ZBB-NEXT: ret +; +; XQCI-LABEL: smin_i32_negone: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.ligei a0, a0, 0, -1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smin_i32_negone: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: li a1, -1 +; RV32I-SFB-NEXT: bltz a0, .LBB26_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a1 +; RV32I-SFB-NEXT: .LBB26_2: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smin_i32_negone: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: li a1, -1 +; RV64I-SFB-NEXT: bltz a0, .LBB26_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB26_2: +; RV64I-SFB-NEXT: ret %c = call i32 @llvm.smin.i32(i32 %a, i32 -1) ret i32 %c } @@ -672,6 +1243,34 @@ define i64 @smin_i64_negone(i64 %a) { ; RV64ZBB-NEXT: li a1, -1 ; RV64ZBB-NEXT: min a0, a0, a1 ; RV64ZBB-NEXT: ret +; +; XQCI-LABEL: smin_i64_negone: +; XQCI: # %bb.0: +; XQCI-NEXT: qc.ligei a0, a1, 0, -1 +; XQCI-NEXT: qc.ligei a1, a1, 0, -1 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: smin_i64_negone: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: li a2, -1 +; RV32I-SFB-NEXT: bltz a1, .LBB27_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a0, a2 +; RV32I-SFB-NEXT: .LBB27_2: +; RV32I-SFB-NEXT: bltz a1, .LBB27_4 +; RV32I-SFB-NEXT: # %bb.3: +; RV32I-SFB-NEXT: mv a1, a2 +; RV32I-SFB-NEXT: .LBB27_4: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: smin_i64_negone: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: li a1, -1 +; RV64I-SFB-NEXT: bltz a0, .LBB27_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB27_2: +; RV64I-SFB-NEXT: ret %c = call i64 @llvm.smin.i64(i64 %a, i64 -1) ret i64 %c } @@ -720,6 +1319,41 @@ define i64 @umax_i64_one(i64 %a, i64 %b) { ; RV64ZBB-NEXT: li a1, 1 ; RV64ZBB-NEXT: maxu a0, a0, a1 ; RV64ZBB-NEXT: ret +; +; XQCI-LABEL: umax_i64_one: +; XQCI: # %bb.0: +; XQCI-NEXT: mv a2, a1 +; XQCI-NEXT: qc.selectinei a2, 0, a0, 1 +; XQCI-NEXT: qc.liltui a0, a0, 2, 1 +; XQCI-NEXT: qc.mvnei a0, a1, 0, a2 +; XQCI-NEXT: ret +; +; RV32I-SFB-LABEL: umax_i64_one: +; RV32I-SFB: # %bb.0: +; RV32I-SFB-NEXT: li a2, 1 +; RV32I-SFB-NEXT: li a3, 1 +; RV32I-SFB-NEXT: beqz a1, .LBB28_2 +; RV32I-SFB-NEXT: # %bb.1: +; RV32I-SFB-NEXT: mv a3, a0 +; RV32I-SFB-NEXT: .LBB28_2: +; RV32I-SFB-NEXT: bnez a0, .LBB28_4 +; RV32I-SFB-NEXT: # %bb.3: +; RV32I-SFB-NEXT: mv a0, a2 +; RV32I-SFB-NEXT: .LBB28_4: +; RV32I-SFB-NEXT: beqz a1, .LBB28_6 +; RV32I-SFB-NEXT: # %bb.5: +; RV32I-SFB-NEXT: mv a0, a3 +; RV32I-SFB-NEXT: .LBB28_6: +; RV32I-SFB-NEXT: ret +; +; RV64I-SFB-LABEL: umax_i64_one: +; RV64I-SFB: # %bb.0: +; RV64I-SFB-NEXT: li a1, 1 +; RV64I-SFB-NEXT: bnez a0, .LBB28_2 +; RV64I-SFB-NEXT: # %bb.1: +; RV64I-SFB-NEXT: mv a0, a1 +; RV64I-SFB-NEXT: .LBB28_2: +; RV64I-SFB-NEXT: ret %c = call i64 @llvm.umax.i64(i64 %a, i64 1) ret i64 %c } diff --git a/llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.ll b/llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.ll new file mode 100644 index 0000000..c19e93d --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.ll @@ -0,0 +1,76 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -O1 -mtriple=riscv64 -mattr=+v < %s | FileCheck %s + +define i32 @pr134424(i64 %input_value, i32 %base_value, i1 %cond_flag1, i1 %cond_flag2, i1 %cond_flag3) { +; CHECK-LABEL: pr134424: +; CHECK: # %bb.0: # %for.body.us.preheader.i +; CHECK-NEXT: andi a3, a3, 1 +; CHECK-NEXT: andi a5, a2, 1 +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; CHECK-NEXT: vmv.v.i v0, 14 +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: bnez a5, .LBB0_2 +; CHECK-NEXT: # %bb.1: # %for.body.us.preheader.i +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: .LBB0_2: # %for.body.us.preheader.i +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: andi a4, a4, 1 +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: bnez a3, .LBB0_4 +; CHECK-NEXT: # %bb.3: # %for.body.us.preheader.i +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: .LBB0_4: # %for.body.us.preheader.i +; CHECK-NEXT: vmsle.vi v0, v8, 0 +; CHECK-NEXT: sext.w a2, a2 +; CHECK-NEXT: bnez a4, .LBB0_6 +; CHECK-NEXT: # %bb.5: # %for.body.us.preheader.i +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: .LBB0_6: # %for.body.us.preheader.i +; CHECK-NEXT: sext.w a0, a0 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vredmin.vs v8, v8, v8 +; CHECK-NEXT: vmv.x.s a3, v8 +; CHECK-NEXT: sext.w a1, a1 +; CHECK-NEXT: bge a3, a2, .LBB0_11 +; CHECK-NEXT: # %bb.7: # %for.body.us.preheader.i +; CHECK-NEXT: bge a0, a1, .LBB0_12 +; CHECK-NEXT: .LBB0_8: # %for.body.us.preheader.i +; CHECK-NEXT: blt a3, a0, .LBB0_10 +; CHECK-NEXT: .LBB0_9: # %for.body.us.preheader.i +; CHECK-NEXT: mv a3, a0 +; CHECK-NEXT: .LBB0_10: # %for.body.us.preheader.i +; CHECK-NEXT: sw a3, 0(zero) +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB0_11: # %for.body.us.preheader.i +; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: blt a0, a1, .LBB0_8 +; CHECK-NEXT: .LBB0_12: # %for.body.us.preheader.i +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: bge a3, a0, .LBB0_9 +; CHECK-NEXT: j .LBB0_10 +for.body.us.preheader.i: + %partial_vector = insertelement <4 x i64> zeroinitializer, i64 %input_value, i64 1 + %comparison_vector = shufflevector <4 x i64> %partial_vector, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 1, i32 1> + %comparison_result = icmp sle <4 x i64> %comparison_vector, zeroinitializer + %selected_value1 = select i1 %cond_flag1, i32 %base_value, i32 1 + %selected_value2 = select i1 %cond_flag2, i32 %base_value, i32 1 + %selected_value3 = select i1 %cond_flag3, i32 %base_value, i32 1 + %bool_to_int = zext <4 x i1> %comparison_result to <4 x i32> + %extended_vector = shufflevector <4 x i32> %bool_to_int, <4 x i32> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison> + %vector_min = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %extended_vector) + %min1 = call i32 @llvm.smin.i32(i32 %vector_min, i32 %selected_value1) + %min2 = call i32 @llvm.smin.i32(i32 %selected_value2, i32 %selected_value3) + %final_min = call i32 @llvm.smin.i32(i32 %min1, i32 %min2) + store i32 %final_min, ptr null, align 4 + ret i32 0 +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.mir b/llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.mir new file mode 100644 index 0000000..aeab8f6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.mir @@ -0,0 +1,57 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=register-coalescer -o - %s | FileCheck %s + +--- +name: pr71023 +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: pr71023 + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $x10, $v8, $v10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: dead [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF + ; CHECK-NEXT: undef [[PseudoVMV_V_I_M1_:%[0-9]+]].sub_vrm1_2:vrn8m1 = PseudoVMV_V_I_M1 undef [[PseudoVMV_V_I_M1_]].sub_vrm1_2, 0, -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]].sub_vrm1_6:vrn8m1 = COPY undef [[PseudoVMV_V_I_M1_]].sub_vrm1_2 + ; CHECK-NEXT: BNE undef [[DEF]], $x0, %bb.3 + ; CHECK-NEXT: PseudoBR %bb.1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: BNE undef [[DEF]], $x0, %bb.3 + ; CHECK-NEXT: PseudoBR %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: successors: %bb.3(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3: + ; CHECK-NEXT: dead [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF + ; CHECK-NEXT: early-clobber [[PseudoVMV_V_I_M1_]].sub_vrm1_0:vrn8m1 = PseudoVRGATHER_VI_M1 undef [[PseudoVMV_V_I_M1_]].sub_vrm1_0, [[PseudoVMV_V_I_M1_]].sub_vrm1_2, 0, 0, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSSEG6E8_V_M1_MASK [[PseudoVMV_V_I_M1_]].sub_vrm1_0_sub_vrm1_1_sub_vrm1_2_sub_vrm1_3_sub_vrm1_4_sub_vrm1_5, undef [[DEF]], killed undef $v0, 0, 3 /* e8 */, implicit $vl, implicit $vtype :: (store unknown-size, align 1) + ; CHECK-NEXT: PseudoRET + bb.0: + successors: %bb.3(0x40000000), %bb.1(0x40000000) + liveins: $x10, $v8, $v10 + %0:gpr = IMPLICIT_DEF + %1:vrnov0 = PseudoVMV_V_I_M1 undef %1, 0, -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + %2:vrnov0 = IMPLICIT_DEF + undef %3.sub_vrm1_0:vrn6m1nov0 = COPY undef %1 + %3.sub_vrm1_3:vrn6m1nov0 = COPY %2 + %3.sub_vrm1_4:vrn6m1nov0 = COPY undef %1 + BNE undef %0, $x0, %bb.3 + PseudoBR %bb.1 + bb.1: + successors: %bb.3(0x40000000), %bb.2(0x40000000) + BNE killed undef %0, $x0, %bb.3 + PseudoBR %bb.2 + bb.2: + successors: %bb.3(0x80000000) + bb.3: + %4:vr = IMPLICIT_DEF + early-clobber %4:vr = PseudoVRGATHER_VI_M1 undef %4, killed %1, 0, 0, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + undef %5.sub_vrm1_0:vrn6m1 = COPY killed %4 + %5.sub_vrm1_5:vrn6m1 = COPY killed %2 + PseudoVSSEG6E8_V_M1_MASK killed %5, undef %0, killed undef $v0, 0, 3 /* e8 */, implicit $vl, implicit $vtype :: (store unknown-size, align 1) + PseudoRET +... diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll b/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll new file mode 100644 index 0000000..d9a49a1 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll @@ -0,0 +1,18 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v -O0 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV64 + +define void @matmul() { +; CHECK-RV64-LABEL: matmul: +; CHECK-RV64: # %bb.0: # %entry +; CHECK-RV64-NEXT: li a0, 0 +; CHECK-RV64-NEXT: vsetvli zero, a0, 512 +; CHECK-RV64-NEXT: sf.vsettm zero, a0 +; CHECK-RV64-NEXT: sf.vtzero.t mt0 +; CHECK-RV64-NEXT: ret +entry: + call void @llvm.riscv.sf.vtzero.t.i64(i64 0, i64 0, i64 0, i64 3, i64 1) + ret void +} + +; Function Attrs: nocallback nofree nosync nounwind willreturn +declare void @llvm.riscv.sf.vtzero.t.i64(i64 immarg, i64, i64, i64 immarg, i64 immarg) #0 diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive-xsfmm-vset-insert.mir b/llvm/test/CodeGen/RISCV/rvv/sifive-xsfmm-vset-insert.mir new file mode 100644 index 0000000..389283a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive-xsfmm-vset-insert.mir @@ -0,0 +1,523 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc %s -o - -mtriple=riscv64 -mattr=+v \ +# RUN: -run-pass=phi-node-elimination,register-coalescer,riscv-insert-vsetvli | FileCheck %s + +--- | + define void @xsfmm_same_state(<vscale x 32 x half> %tile1, <vscale x 32 x half> %tile2, i64 noundef %tm, i64 noundef %tn, i64 noundef %tk) { + entry: + tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 2, <vscale x 32 x half> %tile1, <vscale x 32 x half> %tile2, i64 %tm, i64 %tn, i64 %tk, i64 2) + tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 2, <vscale x 32 x half> %tile1, <vscale x 32 x half> %tile2, i64 %tm, i64 %tn, i64 %tk, i64 2) + ret void + } + + define void @xsfmm_different_state(<vscale x 32 x half> %tile1, <vscale x 32 x half> %tile2, i64 %tm, i64 %tn, i64 %tk) { + entry: + tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 2, <vscale x 32 x half> %tile1, <vscale x 32 x half> %tile2, i64 %tm, i64 %tn, i64 %tk, i64 2) + tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 2, <vscale x 32 x half> %tile1, <vscale x 32 x half> %tile2, i64 %tm, i64 %tn, i64 %tk, i64 4) + ret void + } + + define void @xsfmm_different_state_bf(<vscale x 32 x half> %tile1, <vscale x 32 x bfloat> %tile2, i64 %tm, i64 %tn, i64 %tk) { + entry: + tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 2, <vscale x 32 x half> %tile1, <vscale x 32 x half> %tile1, i64 %tm, i64 %tn, i64 %tk, i64 2) + tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32bf16(i64 2, <vscale x 32 x bfloat> %tile2, <vscale x 32 x bfloat> %tile2, i64 %tm, i64 %tn, i64 %tk, i64 2) + tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 2, <vscale x 32 x half> %tile1, <vscale x 32 x half> %tile1, i64 %tm, i64 %tn, i64 %tk, i64 2) + ret void + } + + define <vscale x 64 x i8> @interleave_rvv_and_xsfmm(<vscale x 64 x i8> %tile, i64 %vl, ptr %base) { + entry: + %0 = call <vscale x 64 x i8> @llvm.riscv.sf.vtmv.v.t.nxv64i8.i64(i64 1, i64 %vl) + %1 = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %tile, <vscale x 64 x i8> %0, i64 %vl) + call void @llvm.riscv.sf.vste16.i64(i64 1, ptr %base, i64 %vl) + ret <vscale x 64 x i8> %1 + } + + define <vscale x 64 x i8> @interleave_rvv_and_xsfmm2(<vscale x 64 x i8> %tile, i64 %vl, ptr %base) { + entry: + %0 = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %tile, <vscale x 64 x i8> %tile, i64 %vl) + %1 = call <vscale x 64 x i8> @llvm.riscv.sf.vtmv.v.t.nxv64i8.i64(i64 1, i64 %vl) + %2 = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %tile, <vscale x 64 x i8> %0, i64 %vl) + call void @llvm.riscv.sf.vste16.i64(i64 1, ptr %base, i64 %vl) + ret <vscale x 64 x i8> %2 + } + + define void @consecutive_xsfmm(<vscale x 32 x half> %tile, i64 %tm, i64 %tn, i64 %tk, ptr %base) { + entry: + tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 0, <vscale x 32 x half> %tile, <vscale x 32 x half> %tile, i64 %tm, i64 %tn, i64 %tk, i64 2) + call void @llvm.riscv.sf.vste16.i64(i64 0, ptr %base, i64 %tn) + ret void + } + + define i64 @vsettnt_max(i64 %vl) { + entry: + %0 = call i64 @llvm.riscv.sf.vsettm.i64(i64 %vl, i64 1, i64 2) + %1 = call i64 @llvm.riscv.sf.vsettnt_max.i64(i64 1, i64 2) + ret i64 %0 + } + + define i64 @single_vsettm(i64 %vl) { + entry: + %0 = call i64 @llvm.riscv.sf.vsettm.i64(i64 %vl, i64 1, i64 2) + ret i64 %0 + } + + define i64 @single_vsettn(i64 %vl) { + entry: + %0 = call i64 @llvm.riscv.sf.vsettn.i64(i64 %vl, i64 1, i64 2) + ret i64 %0 + } + + define i64 @single_vsettk(i64 %vl) { + entry: + %0 = call i64 @llvm.riscv.sf.vsettk.i64(i64 %vl, i64 1, i64 2) + ret i64 %0 + } + + define void @sf_vtzero(i64 %tm, i64 %tn) { + entry: + call void @llvm.riscv.sf.vtzero.i64(i64 1, i64 %tm, i64 %tn, i64 3, i64 4) + ret void + } + + declare void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64, <vscale x 32 x half>, <vscale x 32 x half>, i64, i64, i64, i64) + declare void @llvm.riscv.sf.mm.f.f.i64.nxv32bf16(i64, <vscale x 32 x bfloat>, <vscale x 32 x bfloat>, i64, i64, i64, i64) + declare <vscale x 64 x i8> @llvm.riscv.sf.vtmv.v.t.nxv64i8.i64(i64, i64) + declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, i64) + declare void @llvm.riscv.sf.vste16.i64(i64, ptr, i64) + declare i64 @llvm.riscv.sf.vsettnt_max.i64(i64, i64) + declare i64 @llvm.riscv.sf.vsettm.i64(i64, i64, i64) + declare i64 @llvm.riscv.sf.vsettn.i64(i64, i64, i64) + declare i64 @llvm.riscv.sf.vsettk.i64(i64, i64, i64) + declare void @llvm.riscv.sf.vtzero.i64(i64, i64, i64, i64, i64) +... +--- +name: xsfmm_same_state +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: vrm8 } + - { id: 1, class: vrm8 } + - { id: 2, class: gprnox0 } + - { id: 3, class: gprnox0 } + - { id: 4, class: gprnox0 } +liveins: + - { reg: '$v8m8', virtual-reg: '%0' } + - { reg: '$v8m8', virtual-reg: '%1' } + - { reg: '$x10', virtual-reg: '%2' } + - { reg: '$x11', virtual-reg: '%3' } + - { reg: '$x12', virtual-reg: '%4' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0.entry: + liveins: $v8m8, $v16m8, $x10, $x11, $x12 + ; CHECK-LABEL: name: xsfmm_same_state + ; CHECK: liveins: $v8m8, $v16m8, $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x10 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrm8 = COPY $v16m8 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1032 /* e16, w2 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY4]], [[COPY3]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY4]], [[COPY3]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoRET + %4:gprnox0 = COPY $x12 + %3:gprnox0 = COPY $x11 + %2:gprnox0 = COPY $x10 + %1:vrm8 = COPY $v16m8 + %0:vrm8 = COPY $v8m8 + PseudoSF_MM_F_F $t2, %0:vrm8, %1:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 2, implicit $frm + PseudoSF_MM_F_F $t2, %0:vrm8, %1:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 2, implicit $frm + PseudoRET +... +--- +name: xsfmm_different_state +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: vrm8 } + - { id: 1, class: vrm8 } + - { id: 2, class: gprnox0 } + - { id: 3, class: gprnox0 } + - { id: 4, class: gprnox0 } +liveins: + - { reg: '$v8m8', virtual-reg: '%0' } + - { reg: '$v8m8', virtual-reg: '%1' } + - { reg: '$x10', virtual-reg: '%2' } + - { reg: '$x11', virtual-reg: '%3' } + - { reg: '$x12', virtual-reg: '%4' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0.entry: + liveins: $v8m8, $v16m8, $x10, $x11, $x12 + ; CHECK-LABEL: name: xsfmm_different_state + ; CHECK: liveins: $v8m8, $v16m8, $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x10 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrm8 = COPY $v16m8 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1032 /* e16, w2 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY4]], [[COPY3]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1544 /* e16, w4 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 3, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 3, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY4]], [[COPY3]], 7, $noreg, $noreg, $noreg, 4, 4, implicit $frm, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoRET + %4:gprnox0 = COPY $x12 + %3:gprnox0 = COPY $x11 + %2:gprnox0 = COPY $x10 + %1:vrm8 = COPY $v16m8 + %0:vrm8 = COPY $v8m8 + PseudoSF_MM_F_F $t2, %0:vrm8, %1:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 2, implicit $frm + PseudoSF_MM_F_F $t2, %0:vrm8, %1:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 4, implicit $frm + PseudoRET +... +--- +name: xsfmm_different_state_bf +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: vrm8 } + - { id: 1, class: vrm8 } + - { id: 2, class: gprnox0 } + - { id: 3, class: gprnox0 } + - { id: 4, class: gprnox0 } +liveins: + - { reg: '$v8m8', virtual-reg: '%0' } + - { reg: '$v8m8', virtual-reg: '%1' } + - { reg: '$x10', virtual-reg: '%2' } + - { reg: '$x11', virtual-reg: '%3' } + - { reg: '$x12', virtual-reg: '%4' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0.entry: + liveins: $v8m8, $v16m8, $x10, $x11, $x12 + ; CHECK-LABEL: name: xsfmm_different_state_bf + ; CHECK: liveins: $v8m8, $v16m8, $x10, $x11, $x12 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x12 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x10 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrm8 = COPY $v16m8 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1032 /* e16, w2 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY4]], [[COPY4]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1288 /* e16, w2 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: PseudoSF_MM_F_F_ALT $t2, [[COPY3]], [[COPY3]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1032 /* e16, w2 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY4]], [[COPY4]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoRET + %4:gprnox0 = COPY $x12 + %3:gprnox0 = COPY $x11 + %2:gprnox0 = COPY $x10 + %1:vrm8 = COPY $v16m8 + %0:vrm8 = COPY $v8m8 + PseudoSF_MM_F_F $t2, %0:vrm8, %0:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 2, implicit $frm + PseudoSF_MM_F_F_ALT $t2, %1:vrm8, %1:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 2, implicit $frm + PseudoSF_MM_F_F $t2, %0:vrm8, %0:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 2, implicit $frm + PseudoRET +... +--- +name: interleave_rvv_and_xsfmm +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: vrm8 } + - { id: 1, class: gprnox0 } + - { id: 2, class: gpr } + - { id: 3, class: gpr } + - { id: 4, class: vrm8 } + - { id: 5, class: vrm8 } +liveins: + - { reg: '$v8m8', virtual-reg: '%0' } + - { reg: '$x10', virtual-reg: '%1' } + - { reg: '$x11', virtual-reg: '%2' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0.entry: + liveins: $v8m8, $x10, $x11 + ; CHECK-LABEL: name: interleave_rvv_and_xsfmm + ; CHECK: liveins: $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1 + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 512 /* e8, w1 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoSF_VTMV_V_T:%[0-9]+]]:vrm8 = PseudoSF_VTMV_V_T [[ADDI]], $noreg, 3, 1, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY1]], 195 /* e8, m8, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 $noreg, [[COPY2]], [[PseudoSF_VTMV_V_T]], $noreg, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: PseudoSF_VSTE16 [[ADDI]], [[COPY]], $noreg, 4, 1, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]], implicit $vtype + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %2:gpr = COPY $x11 + %1:gprnox0 = COPY $x10 + %0:vrm8 = COPY $v8m8 + %3:gpr = ADDI $x0, 1 + %4:vrm8 = PseudoSF_VTMV_V_T %3:gpr, %1:gprnox0, 3, 1 + %5:vrm8 = PseudoVADD_VV_M8 $noreg, %0:vrm8, killed %4:vrm8, %1:gprnox0, 3, 0 + PseudoSF_VSTE16 %3:gpr, %2:gpr, %1:gprnox0, 4, 1 + $v8m8 = COPY %5:vrm8 + PseudoRET implicit $v8m8 +... +--- +name: interleave_rvv_and_xsfmm2 +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: vrm8 } + - { id: 1, class: gprnox0 } + - { id: 2, class: gpr } + - { id: 3, class: gpr } + - { id: 4, class: vrm8 } + - { id: 5, class: vrm8 } +liveins: + - { reg: '$v8m8', virtual-reg: '%0' } + - { reg: '$x10', virtual-reg: '%1' } + - { reg: '$x11', virtual-reg: '%2' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0.entry: + liveins: $v8m8, $x10, $x11 + ; CHECK-LABEL: name: interleave_rvv_and_xsfmm2 + ; CHECK: liveins: $v8m8, $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1 + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY1]], 195 /* e8, m8, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 $noreg, [[COPY2]], [[COPY2]], $noreg, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 512 /* e8, w1 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead [[PseudoSF_VTMV_V_T:%[0-9]+]]:vrm8 = PseudoSF_VTMV_V_T [[ADDI]], $noreg, 3, 1, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY1]], 195 /* e8, m8, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVADD_VV_M8_1:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 $noreg, [[PseudoVADD_VV_M8_]], [[PseudoVADD_VV_M8_]], $noreg, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: PseudoSF_VSTE16 [[ADDI]], [[COPY]], $noreg, 4, 1, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_1]], implicit $vtype + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %2:gpr = COPY $x11 + %1:gprnox0 = COPY $x10 + %0:vrm8 = COPY $v8m8 + %3:gpr = ADDI $x0, 1 + %4:vrm8 = PseudoVADD_VV_M8 $noreg, %0:vrm8, killed %0:vrm8, %1:gprnox0, 3, 0 + %5:vrm8 = PseudoSF_VTMV_V_T %3:gpr, %1:gprnox0, 3, 1 + %6:vrm8 = PseudoVADD_VV_M8 $noreg, %4:vrm8, killed %4:vrm8, %1:gprnox0, 3, 0 + PseudoSF_VSTE16 %3:gpr, %2:gpr, %1:gprnox0, 4, 1 + $v8m8 = COPY %6:vrm8 + PseudoRET implicit $v8m8 +... +--- +name: consecutive_xsfmm +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: vrm8 } + - { id: 1, class: gprnox0 } + - { id: 2, class: gprnox0 } + - { id: 3, class: gprnox0 } + - { id: 4, class: gprnox0 } +liveins: + - { reg: '$v8m8', virtual-reg: '%0' } + - { reg: '$x10', virtual-reg: '%1' } + - { reg: '$x11', virtual-reg: '%2' } + - { reg: '$x12', virtual-reg: '%3' } + - { reg: '$x13', virtual-reg: '%4' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0.entry: + liveins: $v8m8, $x10, $x11, $x12, $x13 + ; CHECK-LABEL: name: consecutive_xsfmm + ; CHECK: liveins: $v8m8, $x10, $x11, $x12, $x13 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x10 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gprnox0 = COPY $x12 + ; CHECK-NEXT: dead [[COPY4:%[0-9]+]]:gprnox0 = COPY $x13 + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY2]], 1032 /* e16, w2 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY1]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY3]], 4, 2, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY]], [[COPY]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY3]], 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: PseudoSF_VSTE16 [[COPY1]], [[COPY2]], $noreg, 4, 1, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoRET + %0:vrm8 = COPY $v8m8 + %1:gprnox0 = COPY $x10 + %2:gprnox0 = COPY $x11 + %3:gprnox0 = COPY $x12 + %4:gprnox0 = COPY $x13 + PseudoSF_MM_F_F $t2, %0:vrm8, %0:vrm8, 7, %1:gprnox0, %2:gprnox0, %3:gprnox0, 4, 2, implicit $frm + PseudoSF_VSTE16 %1:gprnox0, %2:gprnox0, %3:gprnox0, 4, 1 + PseudoRET +... +--- +name: vsettnt_max +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: gprnox0 } +liveins: + - { reg: '$x10', virtual-reg: '%0' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0.entry: + liveins: $x10 + ; CHECK-LABEL: name: vsettnt_max + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10 + ; CHECK-NEXT: dead [[PseudoSF_VSETTNTX0_:%[0-9]+]]:gprnox0 = PseudoSF_VSETTNTX0 killed $x0, 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead [[PseudoSF_VSETTK:%[0-9]+]]:gprnox0 = PseudoSF_VSETTK [[COPY]], 4, 1, implicit-def $vtype, implicit $vtype, implicit $vtype + ; CHECK-NEXT: dead [[PseudoSF_VSETTNTX0_1:%[0-9]+]]:gprnox0 = PseudoSF_VSETTNTX0 $x0, 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: [[PseudoSF_VSETTM:%[0-9]+]]:gprnox0 = PseudoSF_VSETTM [[COPY]], 4, 1, implicit-def $vtype, implicit $vtype, implicit $vtype + ; CHECK-NEXT: $x10 = COPY [[PseudoSF_VSETTM]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprnox0 = COPY $x10 + %1:gprnox0 = PseudoSF_VSETTK %0:gprnox0, 4, 1, implicit-def $vtype, implicit $vtype + %2:gprnox0 = PseudoSF_VSETTNTX0 $x0, 520, implicit-def $vl, implicit-def $vtype, implicit $vtype + %3:gprnox0 = PseudoSF_VSETTM %0:gprnox0, 4, 1, implicit-def $vtype, implicit $vtype + $x10 = COPY %3:gprnox0 + PseudoRET implicit $x10 +... +--- +name: single_vsettm +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: gprnox0 } +liveins: + - { reg: '$x10', virtual-reg: '%0' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0.entry: + liveins: $x10 + ; CHECK-LABEL: name: single_vsettm + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10 + ; CHECK-NEXT: dead [[PseudoSF_VSETTNTX0_:%[0-9]+]]:gprnox0 = PseudoSF_VSETTNTX0 killed $x0, 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoSF_VSETTM:%[0-9]+]]:gprnox0 = PseudoSF_VSETTM [[COPY]], 4, 1, implicit-def $vtype, implicit $vtype, implicit $vtype + ; CHECK-NEXT: $x10 = COPY [[PseudoSF_VSETTM]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprnox0 = COPY $x10 + %1:gprnox0 = PseudoSF_VSETTM %0:gprnox0, 4, 1, implicit-def $vtype, implicit $vtype + $x10 = COPY %1:gprnox0 + PseudoRET implicit $x10 +... +--- +name: single_vsettn +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: gprnox0 } +liveins: + - { reg: '$x10', virtual-reg: '%0' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0.entry: + liveins: $x10 + ; CHECK-LABEL: name: single_vsettn + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10 + ; CHECK-NEXT: [[PseudoSF_VSETTNT:%[0-9]+]]:gprnox0 = PseudoSF_VSETTNT [[COPY]], 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: $x10 = COPY [[PseudoSF_VSETTNT]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprnox0 = COPY $x10 + %1:gprnox0 = PseudoSF_VSETTNT %0:gprnox0, 520, implicit-def $vl, implicit-def $vtype, implicit $vtype + $x10 = COPY %1:gprnox0 + PseudoRET implicit $x10 +... +--- +name: single_vsettk +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: gprnox0 } +liveins: + - { reg: '$x10', virtual-reg: '%0' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0.entry: + liveins: $x10 + ; CHECK-LABEL: name: single_vsettk + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10 + ; CHECK-NEXT: dead [[PseudoSF_VSETTNTX0_:%[0-9]+]]:gprnox0 = PseudoSF_VSETTNTX0 killed $x0, 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoSF_VSETTK:%[0-9]+]]:gprnox0 = PseudoSF_VSETTK [[COPY]], 4, 1, implicit-def $vtype, implicit $vtype, implicit $vtype + ; CHECK-NEXT: $x10 = COPY [[PseudoSF_VSETTK]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprnox0 = COPY $x10 + %1:gprnox0 = PseudoSF_VSETTK %0:gprnox0, 4, 1, implicit-def $vtype, implicit $vtype + $x10 = COPY %1:gprnox0 + PseudoRET implicit $x10 +... +--- +name: sf_vtzero +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: gprnox0 } + - { id: 1, class: gprnox0 } +liveins: + - { reg: '$x10', virtual-reg: '%0' } + - { reg: '$x11', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0.entry: + liveins: $x10, $x11 + ; CHECK-LABEL: name: sf_vtzero + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1536 /* e8, w4 */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY]], 3, 3, implicit-def $vtype, implicit $vtype + ; CHECK-NEXT: PseudoSF_VTZERO_T $t1, $noreg, $noreg, 3, 4, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoRET + %0:gprnox0 = COPY $x10 + %1:gprnox0 = COPY $x11 + PseudoSF_VTZERO_T $t1, %0:gprnox0, %1:gprnox0, 3, 4 + PseudoRET +... diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll new file mode 100644 index 0000000..9b9a849 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll @@ -0,0 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.mm.e4m3.e4m3.iXLen.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_mm_e4m3_e4m3_w4_u8m8_u8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) { +; CHECK-LABEL: test_sf_mm_e4m3_e4m3_w4_u8m8_u8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4 +; CHECK-NEXT: sf.vsettm zero, a1 +; CHECK-NEXT: sf.vsettk zero, a3 +; CHECK-NEXT: sf.mm.e4m3.e4m3 mt0, v8, v16 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.mm.e4m3.e4m3.iXLen.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll new file mode 100644 index 0000000..b63974f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll @@ -0,0 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.mm.e4m3.e5m2.iXLen.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) { +; CHECK-LABEL: test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4 +; CHECK-NEXT: sf.vsettm zero, a1 +; CHECK-NEXT: sf.vsettk zero, a3 +; CHECK-NEXT: sf.mm.e4m3.e5m2 mt0, v8, v16 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.mm.e4m3.e5m2.iXLen.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll new file mode 100644 index 0000000..62d629b1 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll @@ -0,0 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.mm.e5m2.e4m3.iXLen.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_mm_e5m2_e5m2_w4_u8m8_u8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) { +; CHECK-LABEL: test_sf_mm_e5m2_e5m2_w4_u8m8_u8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4 +; CHECK-NEXT: sf.vsettm zero, a1 +; CHECK-NEXT: sf.vsettk zero, a3 +; CHECK-NEXT: sf.mm.e5m2.e4m3 mt0, v8, v16 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.mm.e5m2.e4m3.iXLen.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll new file mode 100644 index 0000000..7a90c97 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll @@ -0,0 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.mm.e5m2.e5m2.iXLen.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) { +; CHECK-LABEL: test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4 +; CHECK-NEXT: sf.vsettm zero, a1 +; CHECK-NEXT: sf.vsettk zero, a3 +; CHECK-NEXT: sf.mm.e5m2.e5m2 mt0, v8, v16 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.mm.e5m2.e5m2.iXLen.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll new file mode 100644 index 0000000..29451c6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll @@ -0,0 +1,52 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+xsfmm32a32f -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+xsfmm32a32f -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.mm.f.f.iXLen.nxv32f16(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_mm_f_f_w2_f16m8(iXLen %mtd, <vscale x 32 x half> %v1, <vscale x 32 x half> %v2, iXLen %tm, iXLen %tn, iXLen %tk) { +; CHECK-LABEL: test_sf_mm_f_f_w2_f16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e16, w2 +; CHECK-NEXT: sf.vsettm zero, a1 +; CHECK-NEXT: sf.vsettk zero, a3 +; CHECK-NEXT: sf.mm.f.f mt0, v8, v16 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.mm.f.f.iXLen.nxv32f16(iXLen 0, <vscale x 32 x half> %v1, <vscale x 32 x half> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 2) + ret void +} + +declare void @llvm.riscv.sf.mm.f.f.iXLen.nxv16f32(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_mm_f_f_w1_f32m8(iXLen %mtd, <vscale x 16 x float> %v1, <vscale x 16 x float> %v2, iXLen %tm, iXLen %tn, iXLen %tk) { +; CHECK-LABEL: test_sf_mm_f_f_w1_f32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e32, w1 +; CHECK-NEXT: sf.vsettm zero, a1 +; CHECK-NEXT: sf.vsettk zero, a3 +; CHECK-NEXT: sf.mm.f.f mt0, v8, v16 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.mm.f.f.iXLen.nxv16f32(iXLen 0, <vscale x 16 x float> %v1, <vscale x 16 x float> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 1) + ret void +} + +declare void @llvm.riscv.sf.mm.f.f.iXLen.nxv8f64(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_mm_f_f_w1_f64m8(iXLen %mtd, <vscale x 8 x double> %v1, <vscale x 8 x double> %v2, iXLen %tm, iXLen %tn, iXLen %tk) { +; CHECK-LABEL: test_sf_mm_f_f_w1_f64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e64, w1 +; CHECK-NEXT: sf.vsettm zero, a1 +; CHECK-NEXT: sf.vsettk zero, a3 +; CHECK-NEXT: sf.mm.f.f mt0, v8, v16 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.mm.f.f.iXLen.nxv8f64(iXLen 0, <vscale x 8 x double> %v1, <vscale x 8 x double> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 1) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll new file mode 100644 index 0000000..6a4b29f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll @@ -0,0 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8i \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.mm.s.s.iXLen.nxv64i8.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_mm_s_s_w4_i8m8_i8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) { +; CHECK-LABEL: test_sf_mm_s_s_w4_i8m8_i8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4 +; CHECK-NEXT: sf.vsettm zero, a1 +; CHECK-NEXT: sf.vsettk zero, a3 +; CHECK-NEXT: sf.mm.s.s mt0, v8, v16 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.mm.s.s.iXLen.nxv64i8.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll new file mode 100644 index 0000000..79239b0 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll @@ -0,0 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8i \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.mm.s.u.iXLen.nxv64i8.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_mm_s_u_w4_i8m8_i8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) { +; CHECK-LABEL: test_sf_mm_s_u_w4_i8m8_i8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4 +; CHECK-NEXT: sf.vsettm zero, a1 +; CHECK-NEXT: sf.vsettk zero, a3 +; CHECK-NEXT: sf.mm.s.u mt0, v8, v16 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.mm.s.u.iXLen.nxv64i8.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll new file mode 100644 index 0000000..b0d039b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll @@ -0,0 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8i \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.mm.u.s.iXLen.nxv64i8.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_mm_u_s_w4_i8m8_i8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) { +; CHECK-LABEL: test_sf_mm_u_s_w4_i8m8_i8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4 +; CHECK-NEXT: sf.vsettm zero, a1 +; CHECK-NEXT: sf.vsettk zero, a3 +; CHECK-NEXT: sf.mm.u.s mt0, v8, v16 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.mm.u.s.iXLen.nxv64i8.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll new file mode 100644 index 0000000..913c277 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll @@ -0,0 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8i \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.mm.u.u.iXLen.nxv64i8.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_mm_u_u_w4_i8m8_i8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) { +; CHECK-LABEL: test_sf_mm_u_u_w4_i8m8_i8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4 +; CHECK-NEXT: sf.vsettm zero, a1 +; CHECK-NEXT: sf.vsettk zero, a3 +; CHECK-NEXT: sf.mm.u.u mt0, v8, v16 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.mm.u.u.iXLen.nxv64i8.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll new file mode 100644 index 0000000..8048dec --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.vlte16.iXLen(iXLen, ptr, iXLen) + +define dso_local void @test_sf_vlte16(iXLen %tss, ptr %base, iXLen %vl) { +; CHECK-LABEL: test_sf_vlte16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e16, w1 +; CHECK-NEXT: sf.vlte16 a0, (a1) +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vlte16.iXLen(iXLen %tss, ptr %base, iXLen %vl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll new file mode 100644 index 0000000..a526dc8 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.vlte32.iXLen(iXLen, ptr, iXLen) + +define dso_local void @test_sf_vlte32(iXLen %tss, ptr %base, iXLen %vl) { +; CHECK-LABEL: test_sf_vlte32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e32, w1 +; CHECK-NEXT: sf.vlte32 a0, (a1) +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vlte32.iXLen(iXLen %tss, ptr %base, iXLen %vl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll new file mode 100644 index 0000000..ed0c48a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.vlte64.iXLen(iXLen, ptr, iXLen) + +define dso_local void @test_sf_vlte64(iXLen %tss, ptr %base, iXLen %vl) { +; CHECK-LABEL: test_sf_vlte64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e64, w1 +; CHECK-NEXT: sf.vlte64 a0, (a1) +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vlte64.iXLen(iXLen %tss, ptr %base, iXLen %vl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll new file mode 100644 index 0000000..67b3ed2 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.vlte8.iXLen(iXLen, ptr, iXLen) + +define dso_local void @test_sf_vlte8(iXLen %tss, ptr %base, iXLen %vl) { +; CHECK-LABEL: test_sf_vlte8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e8, w1 +; CHECK-NEXT: sf.vlte8 a0, (a1) +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vlte8.iXLen(iXLen %tss, ptr %base, iXLen %vl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll new file mode 100644 index 0000000..4da37fa --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare iXLen @llvm.riscv.sf.vsettk.iXLen(iXLen, iXLen, iXLen) + +define iXLen @test_sf_vsettk(iXLen %tk) { +; CHECK-LABEL: test_sf_vsettk: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt a1, zero, e16, w2 +; CHECK-NEXT: sf.vsettk a0, a0 +; CHECK-NEXT: ret + entry: + %0 = call iXLen @llvm.riscv.sf.vsettk.iXLen(iXLen %tk, iXLen 1, iXLen 2) + ret iXLen %0 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll new file mode 100644 index 0000000..143c26c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare iXLen @llvm.riscv.sf.vsettm.iXLen(iXLen, iXLen, iXLen) + +define iXLen @test_sf_vsettm(iXLen %tm) { +; CHECK-LABEL: test_sf_vsettm: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt a1, zero, e8, w4 +; CHECK-NEXT: sf.vsettm a0, a0 +; CHECK-NEXT: ret + entry: + %0 = call iXLen @llvm.riscv.sf.vsettm.iXLen(iXLen %tm, iXLen 0, iXLen 3) + ret iXLen %0 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll new file mode 100644 index 0000000..48fa1bc8 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll @@ -0,0 +1,72 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen, iXLen, iXLen) + +define iXLen @test_sf_vsettnt_e8w1(iXLen %tn) { +; CHECK-LABEL: test_sf_vsettnt_e8w1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt a0, a0, e8, w1 +; CHECK-NEXT: ret + entry: + %0 = call iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen %tn, iXLen 0, iXLen 1) + ret iXLen %0 +} + +define iXLen @test_sf_vsettnt_e8w2(iXLen %tn) { +; CHECK-LABEL: test_sf_vsettnt_e8w2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt a0, a0, e8, w2 +; CHECK-NEXT: ret + entry: + %0 = call iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen %tn, iXLen 0, iXLen 2) + ret iXLen %0 +} + +define iXLen @test_sf_vsettnt_e8w4(iXLen %tn) { +; CHECK-LABEL: test_sf_vsettnt_e8w4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt a0, a0, e8, w4 +; CHECK-NEXT: ret + entry: + %0 = call iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen %tn, iXLen 0, iXLen 3) + ret iXLen %0 +} + +define iXLen @test_sf_vsettnt_e16w1(iXLen %tn) { +; CHECK-LABEL: test_sf_vsettnt_e16w1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt a0, a0, e16, w1 +; CHECK-NEXT: ret + entry: + %0 = call iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen %tn, iXLen 1, iXLen 1) + ret iXLen %0 +} + +define iXLen @test_sf_vsettnt_e16w2(iXLen %tn) { +; CHECK-LABEL: test_sf_vsettnt_e16w2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt a0, a0, e16, w2 +; CHECK-NEXT: ret + entry: + %0 = call iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen %tn, iXLen 1, iXLen 2) + ret iXLen %0 +} + +define iXLen @test_sf_vsettnt_e16w4(iXLen %tn) { +; CHECK-LABEL: test_sf_vsettnt_e16w4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt a0, a0, e16, w4 +; CHECK-NEXT: ret + entry: + %0 = call iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen %tn, iXLen 1, iXLen 3) + ret iXLen %0 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll new file mode 100644 index 0000000..7a76151 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.vste16.iXLen(iXLen, ptr, iXLen) + +define dso_local void @test_sf_vste16(iXLen %tss, ptr %base, iXLen %vl) { +; CHECK-LABEL: test_sf_vste16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e16, w1 +; CHECK-NEXT: sf.vste16 a0, (a1) +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vste16.iXLen(iXLen %tss, ptr %base, iXLen %vl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll new file mode 100644 index 0000000..8ff6e6a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.vste32.iXLen(iXLen, ptr, iXLen) + +define dso_local void @test_sf_vste32(iXLen %tss, ptr %base, iXLen %vl) { +; CHECK-LABEL: test_sf_vste32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e32, w1 +; CHECK-NEXT: sf.vste32 a0, (a1) +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vste32.iXLen(iXLen %tss, ptr %base, iXLen %vl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll new file mode 100644 index 0000000..53990e4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.vste64.iXLen(iXLen, ptr, iXLen) + +define dso_local void @test_sf_vste64(iXLen %tss, ptr %base, iXLen %vl) { +; CHECK-LABEL: test_sf_vste64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e64, w1 +; CHECK-NEXT: sf.vste64 a0, (a1) +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vste64.iXLen(iXLen %tss, ptr %base, iXLen %vl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll new file mode 100644 index 0000000..09b7259 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.vste8.iXLen(iXLen, ptr, iXLen) + +define dso_local void @test_sf_vste8(iXLen %tss, ptr %base, iXLen %vl) { +; CHECK-LABEL: test_sf_vste8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a2, e8, w1 +; CHECK-NEXT: sf.vste8 a0, (a1) +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vste8.iXLen(iXLen %tss, ptr %base, iXLen %vl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll new file mode 100644 index 0000000..394eb60 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll @@ -0,0 +1,22 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.vtdiscard() + +define dso_local void @test_sf_vtdiscard() { +; CHECK-LABEL: test_sf_vtdiscard: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vtdiscard +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vtdiscard() + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll new file mode 100644 index 0000000..66c9d26 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll @@ -0,0 +1,114 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.vtmv.t.v.nxv32bf16.iXLen(iXLen, <vscale x 32 x bfloat>, iXLen) + +define void @test_sf_vtmv_t_v_bf16m8(iXLen %tss, <vscale x 32 x bfloat> %src, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_t_v_bf16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e16, w1 +; CHECK-NEXT: sf.vtmv.t.v a0, v8 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vtmv.t.v.nxv32bf16.iXLen(iXLen %tss, <vscale x 32 x bfloat> %src, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vtmv.t.v.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, iXLen) + +define void @test_sf_vtmv_t_v_f16(iXLen %tss, <vscale x 32 x half> %src, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_t_v_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e16, w1 +; CHECK-NEXT: sf.vtmv.t.v a0, v8 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vtmv.t.v.nxv32f16.iXLen(iXLen %tss, <vscale x 32 x half> %src, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vtmv.t.v.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, iXLen) + +define void @test_sf_vtmv_t_v_f32(iXLen %tss, <vscale x 16 x float> %src, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_t_v_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e32, w1 +; CHECK-NEXT: sf.vtmv.t.v a0, v8 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vtmv.t.v.nxv16f32.iXLen(iXLen %tss, <vscale x 16 x float> %src, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vtmv.t.v.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, iXLen) + +define void @test_sf_vtmv_t_v_f64(iXLen %tss, <vscale x 8 x double> %src, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_t_v_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e64, w1 +; CHECK-NEXT: sf.vtmv.t.v a0, v8 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vtmv.t.v.nxv8f64.iXLen(iXLen %tss, <vscale x 8 x double> %src, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vtmv.t.v.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, iXLen) + +define void @test_sf_vtmv_t_v_i8(iXLen %tss, <vscale x 64 x i8> %src, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_t_v_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e8, w1 +; CHECK-NEXT: sf.vtmv.t.v a0, v8 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vtmv.t.v.nxv64i8.iXLen(iXLen %tss, <vscale x 64 x i8> %src, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vtmv.t.v.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, iXLen) + +define void @test_sf_vtmv_t_v_i16(iXLen %tss, <vscale x 32 x i16> %src, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_t_v_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e16, w1 +; CHECK-NEXT: sf.vtmv.t.v a0, v8 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vtmv.t.v.nxv32i16.iXLen(iXLen %tss, <vscale x 32 x i16> %src, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vtmv.t.v.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, iXLen) + +define void @test_sf_vtmv_t_v_i32(iXLen %tss, <vscale x 16 x i32> %src, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_t_v_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e32, w1 +; CHECK-NEXT: sf.vtmv.t.v a0, v8 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vtmv.t.v.nxv16i32.iXLen(iXLen %tss, <vscale x 16 x i32> %src, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vtmv.t.v.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, iXLen) + +define void @test_sf_vtmv_t_v_i64(iXLen %tss, <vscale x 8 x i64> %src, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_t_v_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e64, w1 +; CHECK-NEXT: sf.vtmv.t.v a0, v8 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vtmv.t.v.nxv8i64.iXLen(iXLen %tss, <vscale x 8 x i64> %src, iXLen %vl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll new file mode 100644 index 0000000..0dcc2ab --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll @@ -0,0 +1,114 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare <vscale x 32 x bfloat> @llvm.riscv.sf.vtmv.v.t.nxv32bf16.iXLen(iXLen, iXLen) + +define <vscale x 32 x bfloat> @test_sf_vtmv_v_t_bf16m8(iXLen %tss, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_v_t_bf16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e16, w1 +; CHECK-NEXT: sf.vtmv.v.t v8, a0 +; CHECK-NEXT: ret + entry: + %0 = call <vscale x 32 x bfloat> @llvm.riscv.sf.vtmv.v.t.nxv32bf16.iXLen(iXLen %tss, iXLen %vl) + ret <vscale x 32 x bfloat> %0 +} + +declare <vscale x 32 x half> @llvm.riscv.sf.vtmv.v.t.nxv32f16.iXLen(iXLen, iXLen) + +define <vscale x 32 x half> @test_sf_vtmv_v_t_f16(iXLen %tss, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_v_t_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e16, w1 +; CHECK-NEXT: sf.vtmv.v.t v8, a0 +; CHECK-NEXT: ret + entry: + %0 = call <vscale x 32 x half> @llvm.riscv.sf.vtmv.v.t.nxv32f16.iXLen(iXLen %tss, iXLen %vl) + ret <vscale x 32 x half> %0 +} + +declare <vscale x 16 x float> @llvm.riscv.sf.vtmv.v.t.nxv16f32.iXLen(iXLen, iXLen) + +define <vscale x 16 x float> @test_sf_vtmv_v_t_f32(iXLen %tss, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_v_t_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e32, w1 +; CHECK-NEXT: sf.vtmv.v.t v8, a0 +; CHECK-NEXT: ret + entry: + %0 = call <vscale x 16 x float> @llvm.riscv.sf.vtmv.v.t.nxv16f32.iXLen(iXLen %tss, iXLen %vl) + ret <vscale x 16 x float> %0 +} + +declare <vscale x 8 x double> @llvm.riscv.sf.vtmv.v.t.nxv8f64.iXLen(iXLen, iXLen) + +define <vscale x 8 x double> @test_sf_vtmv_v_t_f64(iXLen %tss, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_v_t_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e64, w1 +; CHECK-NEXT: sf.vtmv.v.t v8, a0 +; CHECK-NEXT: ret + entry: + %0 = call <vscale x 8 x double> @llvm.riscv.sf.vtmv.v.t.nxv8f64.iXLen(iXLen %tss, iXLen %vl) + ret <vscale x 8 x double> %0 +} + +declare <vscale x 64 x i8> @llvm.riscv.sf.vtmv.v.t.nxv64i8.iXLen(iXLen, iXLen) + +define <vscale x 64 x i8> @test_sf_vtmv_v_t_i8(iXLen %tss, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_v_t_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e8, w1 +; CHECK-NEXT: sf.vtmv.v.t v8, a0 +; CHECK-NEXT: ret + entry: + %0 = call <vscale x 64 x i8> @llvm.riscv.sf.vtmv.v.t.nxv64i8.iXLen(iXLen %tss, iXLen %vl) + ret <vscale x 64 x i8> %0 +} + +declare <vscale x 32 x i16> @llvm.riscv.sf.vtmv.v.t.nxv32i16.iXLen(iXLen, iXLen) + +define <vscale x 32 x i16> @test_sf_vtmv_v_t_i16(iXLen %tss, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_v_t_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e16, w1 +; CHECK-NEXT: sf.vtmv.v.t v8, a0 +; CHECK-NEXT: ret + entry: + %0 = call <vscale x 32 x i16> @llvm.riscv.sf.vtmv.v.t.nxv32i16.iXLen(iXLen %tss, iXLen %vl) + ret <vscale x 32 x i16> %0 +} + +declare <vscale x 16 x i32> @llvm.riscv.sf.vtmv.v.t.nxv16i32.iXLen(iXLen, iXLen) + +define <vscale x 16 x i32> @test_sf_vtmv_v_t_i32(iXLen %tss, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_v_t_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e32, w1 +; CHECK-NEXT: sf.vtmv.v.t v8, a0 +; CHECK-NEXT: ret + entry: + %0 = call <vscale x 16 x i32> @llvm.riscv.sf.vtmv.v.t.nxv16i32.iXLen(iXLen %tss, iXLen %vl) + ret <vscale x 16 x i32> %0 +} + +declare <vscale x 8 x i64> @llvm.riscv.sf.vtmv.v.t.nxv8i64.iXLen(iXLen, iXLen) + +define <vscale x 8 x i64> @test_sf_vtmv_v_t_i64(iXLen %tss, iXLen %vl) { +; CHECK-LABEL: test_sf_vtmv_v_t_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e64, w1 +; CHECK-NEXT: sf.vtmv.v.t v8, a0 +; CHECK-NEXT: ret + entry: + %0 = call <vscale x 8 x i64> @llvm.riscv.sf.vtmv.v.t.nxv8i64.iXLen(iXLen %tss, iXLen %vl) + ret <vscale x 8 x i64> %0 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll new file mode 100644 index 0000000..bbccb02 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll @@ -0,0 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \ +; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare void @llvm.riscv.sf.vtzero.t.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) +define void @test_sf_vtzero_t(iXLen %tm, iXLen %tn) { +; CHECK-LABEL: test_sf_vtzero_t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sf.vsettnt zero, a1, e8, w4 +; CHECK-NEXT: sf.vsettm zero, a0 +; CHECK-NEXT: sf.vtzero.t mt0 +; CHECK-NEXT: ret + entry: + call void @llvm.riscv.sf.vtzero.t.iXLen(iXLen 0, iXLen %tm, iXLen %tn, iXLen 3, iXLen 4) + ret void +} + diff --git a/llvm/test/CodeGen/RISCV/select-to-and-zext.ll b/llvm/test/CodeGen/RISCV/select-to-and-zext.ll index 2f03ff9..318268a 100644 --- a/llvm/test/CodeGen/RISCV/select-to-and-zext.ll +++ b/llvm/test/CodeGen/RISCV/select-to-and-zext.ll @@ -15,8 +15,7 @@ define i32 @from_cmpeq(i32 %xx, i32 %y) { ; ; RV64I-LABEL: from_cmpeq: ; RV64I: # %bb.0: -; RV64I-NEXT: sext.w a0, a0 -; RV64I-NEXT: addi a0, a0, -9 +; RV64I-NEXT: addiw a0, a0, -9 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret @@ -39,8 +38,7 @@ define i32 @from_cmpeq_fail_bad_andmask(i32 %xx, i32 %y) { ; ; RV64I-LABEL: from_cmpeq_fail_bad_andmask: ; RV64I: # %bb.0: -; RV64I-NEXT: sext.w a0, a0 -; RV64I-NEXT: addi a0, a0, -9 +; RV64I-NEXT: addiw a0, a0, -9 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/setcc-logic.ll b/llvm/test/CodeGen/RISCV/setcc-logic.ll index fabb573..4e14893 100644 --- a/llvm/test/CodeGen/RISCV/setcc-logic.ll +++ b/llvm/test/CodeGen/RISCV/setcc-logic.ll @@ -104,9 +104,8 @@ define i1 @and_icmps_const_not1bit_diff(i32 %x) nounwind { ; ; RV64I-LABEL: and_icmps_const_not1bit_diff: ; RV64I: # %bb.0: -; RV64I-NEXT: sext.w a0, a0 -; RV64I-NEXT: addi a1, a0, -44 -; RV64I-NEXT: addi a0, a0, -92 +; RV64I-NEXT: addiw a1, a0, -44 +; RV64I-NEXT: addiw a0, a0, -92 ; RV64I-NEXT: snez a1, a1 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: and a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll index bdbe4ed..07bfbe6 100644 --- a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll +++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll @@ -674,8 +674,7 @@ define i32 @sext_of_not_cmp_i32(i32 %x) { ; ; RV64-LABEL: sext_of_not_cmp_i32: ; RV64: # %bb.0: -; RV64-NEXT: sext.w a0, a0 -; RV64-NEXT: addi a0, a0, -7 +; RV64-NEXT: addiw a0, a0, -7 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: addi a0, a0, -1 ; RV64-NEXT: ret @@ -718,8 +717,7 @@ define i32 @dec_of_zexted_cmp_i32(i32 %x) { ; ; RV64-LABEL: dec_of_zexted_cmp_i32: ; RV64: # %bb.0: -; RV64-NEXT: sext.w a0, a0 -; RV64-NEXT: addi a0, a0, -7 +; RV64-NEXT: addiw a0, a0, -7 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: addi a0, a0, -1 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll index 2751332c..bf6802d 100644 --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -1047,8 +1047,8 @@ define zeroext i1 @usubo.i32.constant.lhs(i32 signext %v1, ptr %res) { ; RV64-LABEL: usubo.i32.constant.lhs: ; RV64: # %bb.0: # %entry ; RV64-NEXT: li a2, -2 -; RV64-NEXT: subw a2, a2, a0 -; RV64-NEXT: addi a0, a2, 1 +; RV64-NEXT: sub a2, a2, a0 +; RV64-NEXT: addiw a0, a2, 1 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: sw a2, 0(a1) ; RV64-NEXT: ret @@ -1065,8 +1065,8 @@ define zeroext i1 @usubo.i32.constant.lhs(i32 signext %v1, ptr %res) { ; RV64ZBA-LABEL: usubo.i32.constant.lhs: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: li a2, -2 -; RV64ZBA-NEXT: subw a2, a2, a0 -; RV64ZBA-NEXT: addi a0, a2, 1 +; RV64ZBA-NEXT: sub a2, a2, a0 +; RV64ZBA-NEXT: addiw a0, a2, 1 ; RV64ZBA-NEXT: seqz a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret @@ -1083,8 +1083,8 @@ define zeroext i1 @usubo.i32.constant.lhs(i32 signext %v1, ptr %res) { ; RV64ZICOND-LABEL: usubo.i32.constant.lhs: ; RV64ZICOND: # %bb.0: # %entry ; RV64ZICOND-NEXT: li a2, -2 -; RV64ZICOND-NEXT: subw a2, a2, a0 -; RV64ZICOND-NEXT: addi a0, a2, 1 +; RV64ZICOND-NEXT: sub a2, a2, a0 +; RV64ZICOND-NEXT: addiw a0, a2, 1 ; RV64ZICOND-NEXT: seqz a0, a0 ; RV64ZICOND-NEXT: sw a2, 0(a1) ; RV64ZICOND-NEXT: ret diff --git a/llvm/test/CodeGen/SPIRV/FCmpFalse.ll b/llvm/test/CodeGen/SPIRV/FCmpFalse.ll new file mode 100644 index 0000000..55d64196 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/FCmpFalse.ll @@ -0,0 +1,10 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: %[[#FalseVal:]] = OpConstantFalse %[[#]] +; CHECK: OpReturnValue %[[#FalseVal:]] + +define spir_func i1 @f(float %0) { + %2 = fcmp false float %0, %0 + ret i1 %2 +} diff --git a/llvm/test/CodeGen/SPIRV/FCmpFalse_Vec.ll b/llvm/test/CodeGen/SPIRV/FCmpFalse_Vec.ll new file mode 100644 index 0000000..c410b64 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/FCmpFalse_Vec.ll @@ -0,0 +1,13 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: %[[#BoolTy:]] = OpTypeBool +; CHECK: %[[#VecTy:]] = OpTypeVector %[[#BoolTy]] 4 +; CHECK: %[[#False:]] = OpConstantFalse %[[#BoolTy]] +; CHECK: %[[#Composite:]] = OpConstantComposite %[[#VecTy]] %[[#False]] %[[#False]] %[[#False]] %[[#False]] +; CHECK: OpReturnValue %[[#Composite]] + +define spir_func <4 x i1> @test(<4 x float> %a) { + %compare = fcmp false <4 x float> %a, %a + ret <4 x i1> %compare +} diff --git a/llvm/test/CodeGen/SPIRV/builtin_duplicate.ll b/llvm/test/CodeGen/SPIRV/builtin_duplicate.ll new file mode 100644 index 0000000..8786554 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/builtin_duplicate.ll @@ -0,0 +1,20 @@ +;; This test checks if we generate a single builtin variable for the following +;; LLVM IR. +;; @__spirv_BuiltInLocalInvocationId - A global variable +;; %3 = tail call i64 @_Z12get_local_idj(i32 0) - A function call + +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpName %[[#]] "__spirv_BuiltInLocalInvocationId" +; CHECK-NOT: OpName %[[#]] "__spirv_BuiltInLocalInvocationId.1" + +@__spirv_BuiltInLocalInvocationId = external dso_local local_unnamed_addr addrspace(1) constant <3 x i64>, align 32 + +declare spir_func i64 @_Z12get_local_idj(i32) local_unnamed_addr + +define spir_kernel void @test(i32 %a) { +entry: + %builtin_call = tail call i64 @_Z12get_local_idj(i32 0) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/complex-constexpr.ll b/llvm/test/CodeGen/SPIRV/complex-constexpr.ll new file mode 100644 index 0000000..e2c1d00 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/complex-constexpr.ll @@ -0,0 +1,21 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +@.str.1 = private unnamed_addr addrspace(1) constant [1 x i8] zeroinitializer, align 1 + +define linkonce_odr hidden spir_func void @test() { +entry: +; CHECK: %[[#MinusOne:]] = OpConstant %[[#]] 18446744073709551615 +; CHECK: %[[#Ptr:]] = OpConvertUToPtr %[[#]] %[[#MinusOne]] +; CHECK: %[[#PtrCast:]] = OpPtrCastToGeneric %[[#]] %[[#]] +; CHECK: %[[#]] = OpFunctionCall %[[#]] %[[#]] %[[#PtrCast]] %[[#Ptr]] + + %cast = bitcast ptr addrspace(4) inttoptr (i64 -1 to ptr addrspace(4)) to ptr addrspace(4) + call spir_func void @bar(ptr addrspace(4) addrspacecast (ptr addrspace(1) @.str.1 to ptr addrspace(4)), ptr addrspace(4) %cast) + ret void +} + +define linkonce_odr hidden spir_func void @bar(ptr addrspace(4) %begin, ptr addrspace(4) %end) { +entry: + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/dominator-order.ll b/llvm/test/CodeGen/SPIRV/dominator-order.ll new file mode 100644 index 0000000..2ecdddc --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/dominator-order.ll @@ -0,0 +1,25 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; This test checks that basic blocks are reordered in SPIR-V so that dominators +; are emitted ahead of their dominated blocks as required by the SPIR-V +; specification. + +; CHECK-DAG: OpName %[[#ENTRY:]] "entry" +; CHECK-DAG: OpName %[[#FOR_BODY137_LR_PH:]] "for.body137.lr.ph" +; CHECK-DAG: OpName %[[#FOR_BODY:]] "for.body" + +; CHECK: %[[#ENTRY]] = OpLabel +; CHECK: %[[#FOR_BODY]] = OpLabel +; CHECK: %[[#FOR_BODY137_LR_PH]] = OpLabel + +define spir_kernel void @test(ptr addrspace(1) %arg, i1 %cond) { +entry: + br label %for.body + +for.body137.lr.ph: ; preds = %for.body + ret void + +for.body: ; preds = %for.body, %entry + br i1 %cond, label %for.body, label %for.body137.lr.ph +} diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_maximal_reconvergence/enable-maximal-reconvergence.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_maximal_reconvergence/enable-maximal-reconvergence.ll new file mode 100644 index 0000000..105f4a4 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_maximal_reconvergence/enable-maximal-reconvergence.ll @@ -0,0 +1,21 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv1.6-unknown-vulkan1.3-compute --spirv-ext=+SPV_KHR_maximal_reconvergence %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv1.6-unknown-vulkan1.3-compute --spirv-ext=+SPV_KHR_maximal_reconvergence %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpCapability Shader +; CHECK: OpExtension "SPV_KHR_maximal_reconvergence" +; CHECK-NOT: OpExecutionMode {{.*}} MaximallyReconvergesKHR +; CHECK: OpExecutionMode [[main:%[0-9]+]] MaximallyReconvergesKHR +; CHECK-NOT: OpExecutionMode {{.*}} MaximallyReconvergesKHR +; CHECK: OpName [[main]] "main" +define void @main() local_unnamed_addr #0 { +entry: + ret void +} + +define void @negative() local_unnamed_addr #1 { +entry: + ret void +} + +attributes #0 = { "enable-maximal-reconvergence"="true" "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } +attributes #1 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } diff --git a/llvm/test/CodeGen/SPIRV/llvm-compiler-used.ll b/llvm/test/CodeGen/SPIRV/llvm-compiler-used.ll new file mode 100644 index 0000000..ddc2585 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-compiler-used.ll @@ -0,0 +1,19 @@ +; RUN: llc -verify-machineinstrs -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %} +; RUN: llc -verify-machineinstrs -mtriple=spirv-unknown-vulkan %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -mtriple=spirv-unknown-vulkan %s -o - -filetype=obj | spirv-val %} + +; Verify that llvm.compiler.used is not lowered. +; CHECK: OpName %{{[0-9]+}} "unused" +; CHECK-NOT: OpName %{{[0-9]+}} "llvm.compiler.used" + +; Check that the type of llvm.compiler.used is not emitted too. +; CHECK-NOT: OpTypeArray + +@unused = private addrspace(3) global i32 0 +@llvm.compiler.used = appending addrspace(2) global [1 x ptr addrspace (4)] [ptr addrspace(4) addrspacecast (ptr addrspace(3) @unused to ptr addrspace(4))] + +define spir_func void @foo() { +entry: + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fake_use.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fake_use.ll new file mode 100644 index 0000000..5370b51 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fake_use.ll @@ -0,0 +1,13 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: OpCapability Addresses +; CHECK-DAG: OpName %[[#]] "foo" + +declare void @llvm.fake.use(...) + +define spir_kernel void @foo(ptr addrspace(1) %a) { +entry: + call void (...) @llvm.fake.use(ptr addrspace(1) %a) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchange_cl20.ll b/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchange_cl20.ll new file mode 100644 index 0000000..8357373 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchange_cl20.ll @@ -0,0 +1,84 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64v1.2-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-NOT: OpCapability Int64Atomics + +; CHECK-DAG: %[[#int:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#int8:]] = OpTypeInt 8 0 +; CHECK-DAG: %[[#DeviceScope:]] = OpConstant %[[#int]] 1 +; CHECK-DAG: %[[#SequentiallyConsistent_MS:]] = OpConstant %[[#int]] 16 +; CHECK-DAG: %[[#int_ptr:]] = OpTypePointer Generic %[[#int]] +; CHECK-DAG: %[[#int_ptr8:]] = OpTypePointer Generic %[[#int8]] +; CHECK-DAG: %[[#bool:]] = OpTypeBool + +define spir_func void @test(ptr addrspace(4) %object, ptr addrspace(4) %expected, i32 %desired) { + +; CHECK: %[[#object:]] = OpFunctionParameter %[[#int_ptr8]] +; CHECK: %[[#expected:]] = OpFunctionParameter %[[#int_ptr8]] +; CHECK: %[[#desired:]] = OpFunctionParameter %[[#int]] + +entry: + %object.addr = alloca ptr addrspace(4), align 4 + %expected.addr = alloca ptr addrspace(4), align 4 + %desired.addr = alloca i32, align 4 + %strong_res = alloca i8, align 1 + %res = alloca i8, align 1 + %weak_res = alloca i8, align 1 + store ptr addrspace(4) %object, ptr %object.addr, align 4 + store ptr addrspace(4) %expected, ptr %expected.addr, align 4 + store i32 %desired, ptr %desired.addr, align 4 + %0 = load ptr addrspace(4), ptr %object.addr, align 4 + %1 = load ptr addrspace(4), ptr %expected.addr, align 4 + %2 = load i32, ptr %desired.addr, align 4 + +; CHECK-DAG: OpStore %[[#object_addr:]] %[[#object]] +; CHECK-DAG: OpStore %[[#expected_addr:]] %[[#expected]] +; CHECK-DAG: OpStore %[[#desired_addr:]] %[[#desired]] + +; CHECK: %[[#Pointer:]] = OpLoad %[[#int_ptr]] %[[#]] +; CHECK: %[[#exp:]] = OpLoad %[[#int_ptr]] %[[#]] +; CHECK: %[[#Value:]] = OpLoad %[[#int]] %[[#desired_addr]] +; CHECK: %[[#Comparator:]] = OpLoad %[[#int]] %[[#exp]] + +; CHECK: %[[#Result:]] = OpAtomicCompareExchange %[[#int]] %[[#]] %[[#DeviceScope]] %[[#SequentiallyConsistent_MS]] %[[#SequentiallyConsistent_MS]] %[[#Value]] %[[#Comparator]] + %call = call spir_func zeroext i1 @_Z30atomic_compare_exchange_strongPVU3AS4U7_AtomiciPU3AS4ii(ptr addrspace(4) %0, ptr addrspace(4) %1, i32 %2) + +; CHECK-NEXT: OpStore %[[#exp]] %[[#Result]] +; CHECK-NEXT: %[[#CallRes:]] = OpIEqual %[[#bool]] %[[#Result]] %[[#Comparator]] +; CHECK-NOT: %[[#Result]] + + %frombool = zext i1 %call to i8 + store i8 %frombool, ptr %strong_res, align 1 + %3 = load i8, ptr %strong_res, align 1 + %tobool = trunc i8 %3 to i1 + %lnot = xor i1 %tobool, true + %frombool1 = zext i1 %lnot to i8 + store i8 %frombool1, ptr %res, align 1 + %4 = load ptr addrspace(4), ptr %object.addr, align 4 + %5 = load ptr addrspace(4), ptr %expected.addr, align 4 + %6 = load i32, ptr %desired.addr, align 4 + +; CHECK: %[[#Pointer:]] = OpLoad %[[#int_ptr]] %[[#]] +; CHECK: %[[#exp:]] = OpLoad %[[#int_ptr]] %[[#]] +; CHECK: %[[#Value:]] = OpLoad %[[#int]] %[[#desired_addr]] +; CHECK: %[[#ComparatorWeak:]] = OpLoad %[[#int]] %[[#exp]] + +; CHECK: %[[#Result:]] = OpAtomicCompareExchangeWeak %[[#int]] %[[#]] %[[#DeviceScope]] %[[#SequentiallyConsistent_MS]] %[[#SequentiallyConsistent_MS]] %[[#Value]] %[[#ComparatorWeak]] + %call2 = call spir_func zeroext i1 @_Z28atomic_compare_exchange_weakPVU3AS4U7_AtomiciPU3AS4ii(ptr addrspace(4) %4, ptr addrspace(4) %5, i32 %6) + +; CHECK-NEXT: OpStore %[[#exp]] %[[#Result]] +; CHECK-NEXT: %[[#CallRes:]] = OpIEqual %[[#bool]] %[[#Result]] %[[#ComparatorWeak]] +; CHECK-NOT: %[[#Result]] + + %frombool3 = zext i1 %call2 to i8 + store i8 %frombool3, ptr %weak_res, align 1 + %7 = load i8, ptr %weak_res, align 1 + %tobool4 = trunc i8 %7 to i1 + %lnot5 = xor i1 %tobool4, true + %frombool6 = zext i1 %lnot5 to i8 + store i8 %frombool6, ptr %res, align 1 + ret void +} + +declare spir_func zeroext i1 @_Z30atomic_compare_exchange_strongPVU3AS4U7_AtomiciPU3AS4ii(ptr addrspace(4), ptr addrspace(4), i32) #1 +declare spir_func zeroext i1 @_Z28atomic_compare_exchange_weakPVU3AS4U7_AtomiciPU3AS4ii(ptr addrspace(4), ptr addrspace(4), i32) #1 diff --git a/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll b/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll index c6ee804..07fbed9 100644 --- a/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll +++ b/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll @@ -90,7 +90,7 @@ define i32 @test_tbegin_nofloat4(i32 %pad, ptr %ptr) { ; CHECK: tbegin 0, 65292 ; CHECK: ipm %r2 ; CHECK: srl %r2, 28 -; CHECK: ciblh %r2, 2, 0(%r14) +; CHECK: bnhr %r14 ; CHECK: mvhi 0(%r3), 0 ; CHECK: br %r14 %res = call i32 @llvm.s390.tbegin.nofloat(ptr null, i32 65292) @@ -219,7 +219,7 @@ define i32 @test_tend2(i32 %pad, ptr %ptr) { ; CHECK: tend ; CHECK: ipm %r2 ; CHECK: srl %r2, 28 -; CHECK: ciblh %r2, 2, 0(%r14) +; CHECK: bnhr %r14 ; CHECK: mvhi 0(%r3), 0 ; CHECK: br %r14 %res = call i32 @llvm.s390.tend() diff --git a/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-01.ll b/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-01.ll new file mode 100644 index 0000000..6b8746e --- /dev/null +++ b/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-01.ll @@ -0,0 +1,738 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s +; Test implementation of combining br_ccmask for flag output operand, and +; optimizing ipm sequence using conditional branches. + +declare void @dummy() + +; Check a case where the cc is used as an integer. +; Just (srl (ipm)) sequence without optimization. +define i32 @test(ptr %a) { +; CHECK-LABEL: test: +; CHECK: # %bb.0: +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ipm %r2 +; CHECK-NEXT: srl %r2, 28 +; CHECK-NEXT: br %r14 + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + ret i32 %cc +} + +; Test-1(f1_0_*). Test all 14 valid combinations, where cc is being used for +; branching. + +; Check (cc == 0). +define void @f1_0_eq_0(ptr %a) { +; CHECK-LABEL: f1_0_eq_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jge dummy@PLT +; CHECK-NEXT: .LBB1_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmp = icmp eq i32 %cc, 0 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc != 0). +define void @f1_0_ne_0(ptr %a) { +; CHECK-LABEL: f1_0_ne_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgne dummy@PLT +; CHECK-NEXT: .LBB2_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmp = icmp ugt i32 %cc, 0 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc == 1). +define void @f1_0_eq_1(ptr %a) { +; CHECK-LABEL: f1_0_eq_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgl dummy@PLT +; CHECK-NEXT: .LBB3_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmp = icmp eq i32 %cc, 1 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc != 1). +define void @f1_0_ne_1(ptr %a) { +; CHECK-LABEL: f1_0_ne_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgnl dummy@PLT +; CHECK-NEXT: .LBB4_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmp = icmp ne i32 %cc, 1 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc == 2). +define void @f1_0_eq_2(ptr %a) { +; CHECK-LABEL: f1_0_eq_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgh dummy@PLT +; CHECK-NEXT: .LBB5_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmp = icmp eq i32 %cc, 2 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc != 2). +define void @f1_0_ne_2(ptr %a) { +; CHECK-LABEL: f1_0_ne_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgnh dummy@PLT +; CHECK-NEXT: .LBB6_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmp = icmp ne i32 %cc, 2 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc == 3). +define void @f1_0_eq_3(ptr %a) { +; CHECK-LABEL: f1_0_eq_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgo dummy@PLT +; CHECK-NEXT: .LBB7_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmp = icmp eq i32 %cc, 3 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc != 3). +define void @f1_0_ne_3(ptr %a) { +; CHECK-LABEL: f1_0_ne_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgno dummy@PLT +; CHECK-NEXT: .LBB8_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmp = icmp ult i32 %cc, 3 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc == 0|1). +define void @f1_0_01(ptr %a) { +; CHECK-LABEL: f1_0_01: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgle dummy@PLT +; CHECK-NEXT: .LBB9_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmp = icmp ult i32 %cc, 2 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc == 0|2). +define void @f1_0_02(ptr %a) { +; CHECK-LABEL: f1_0_02: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jghe dummy@PLT +; CHECK-NEXT: .LBB10_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %and = and i32 %cc, 1 + %cmp = icmp eq i32 %and, 0 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc == 0|3). +define void @f1_0_03(ptr %a) { +; CHECK-LABEL: f1_0_03: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgnlh dummy@PLT +; CHECK-NEXT: .LBB11_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmp0 = icmp ne i32 %cc, 0 + %cmp3 = icmp ne i32 %cc, 3 + %cmp.inv = and i1 %cmp0, %cmp3 + br i1 %cmp.inv, label %exit, label %branch +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc == 1|2). +define void @f1_0_12(ptr %a) { +; CHECK-LABEL: f1_0_12: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jglh dummy@PLT +; CHECK-NEXT: .LBB12_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmpeq1 = icmp eq i32 %cc, 1 + %cmpeq2 = icmp eq i32 %cc, 2 + %cmp = or i1 %cmpeq1, %cmpeq2 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc == 1|3). +define void @f1_0_13(ptr %a) { +; CHECK-LABEL: f1_0_13: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgnhe dummy@PLT +; CHECK-NEXT: .LBB13_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmpeq1 = icmp eq i32 %cc, 1 + %cmpeq3 = icmp eq i32 %cc, 3 + %cmp = or i1 %cmpeq1, %cmpeq3 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check (cc == 2|3). +define void @f1_0_23(ptr %a) { +; CHECK-LABEL: f1_0_23: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgnle dummy@PLT +; CHECK-NEXT: .LBB14_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmp = icmp ugt i32 %cc, 1 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Test-2(f1_1_*/f1_2_*/fl_3_*/f1_4_*). +; Test Mixed patterns involving Binary Ops. + +; Check 'add' for (cc != 0). +define void @f1_1_1(ptr %a) { +; CHECK-LABEL: f1_1_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgne dummy@PLT +; CHECK-NEXT: .LBB15_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -1 + %cmp = icmp ult i32 %add, 3 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check 'add' for (cc == 1|2). +define void @f1_1_2(ptr %a) { +; CHECK-LABEL: f1_1_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jglh dummy@PLT +; CHECK-NEXT: .LBB16_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -1 + %cmp = icmp ult i32 %add, 2 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check 'add' for (cc == 1|2). +define void @f1_1_3(ptr %a) { +; CHECK-LABEL: f1_1_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jglh dummy@PLT +; CHECK-NEXT: .LBB17_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -3 + %cmp.inv = icmp ult i32 %add, -2 + br i1 %cmp.inv, label %exit, label %branch +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check 'and' with one operand cc and other select_ccmask(cc !=1). +define void @f1_2_1(ptr %a) { +; CHECK-LABEL: f1_2_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgnl dummy@PLT +; CHECK-NEXT: .LBB18_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %andcc = and i32 %cc, 1 + %cmpne0 = icmp ne i32 %andcc, 0 + %cmpne3 = icmp ne i32 %cc, 3 + %cmp.inv = and i1 %cmpne3, %cmpne0 + br i1 %cmp.inv, label %exit, label %branch +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check 'and' with both operands select_ccmask(cc != 2). +define void @f1_2_2(ptr %a) { +; CHECK-LABEL: f1_2_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgnh dummy@PLT +; CHECK-NEXT: .LBB19_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %ugt1 = icmp samesign ugt i32 %cc, 1 + %cmpne3 = icmp ne i32 %cc, 3 + %and.cond.inv = and i1 %ugt1, %cmpne3 + br i1 %and.cond.inv, label %exit, label %branch +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check 'and/tm' for (cc == 0|2). +define void @f1_2_3(ptr %a) { +; CHECK-LABEL: f1_2_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jghe dummy@PLT +; CHECK-NEXT: .LBB20_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %and = and i32 %cc, 1 + %cmp = icmp eq i32 %and, 0 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check 'and/tm' for (cc == 1|3). +define void @f1_2_4(ptr %a) { +; CHECK-LABEL: f1_2_4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgnhe dummy@PLT +; CHECK-NEXT: .LBB21_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %and = and i32 %cc, 1 + %cmp = icmp eq i32 %and, 0 + br i1 %cmp, label %exit, label %branch +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1). +define void @f1_2_5(ptr %a) { +; CHECK-LABEL: f1_2_5: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgnl dummy@PLT +; CHECK-NEXT: .LBB22_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpne3 = icmp ne i32 %cc, 3 + %cmp = xor i1 %cmpne3, %trunc + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check nested 'xor' cc with select_ccmask(cc != 1). +define void @f1_3_1(ptr %a) { +; CHECK-LABEL: f1_3_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgnl dummy@PLT +; CHECK-NEXT: .LBB23_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmpeq0 = icmp eq i32 %cc, 0 + %cmpeq2 = icmp eq i32 %cc, 2 + %xor = xor i1 %cmpeq0, %cmpeq2 + %cmpne3 = icmp ne i32 %cc, 3 + %cmp.inv = xor i1 %cmpne3, %xor + br i1 %cmp.inv, label %exit, label %branch +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check branching on 'tm' and 'xor' with one operand cc and the other +; select_ccmask(cc !=1). +define void @f1_3_2(ptr %a) { +; CHECK-LABEL: f1_3_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgnl dummy@PLT +; CHECK-NEXT: .LBB24_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpeq3 = icmp eq i32 %cc, 3 + %cmp.inv = xor i1 %cmpeq3, %trunc + br i1 %cmp.inv, label %exit, label %branch +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check branching on 'tm' and 'xor' with one operand cc and the other +; select_ccmask(cc !=2). +define void @f1_3_3(ptr %a) { +; CHECK-LABEL: f1_3_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgnh dummy@PLT +; CHECK-NEXT: .LBB25_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpne0 = icmp ne i32 %cc, 0 + %cmp.cond.inv = xor i1 %cmpne0, %trunc + br i1 %cmp.cond.inv, label %exit, label %branch +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check 'or' with both operands are select_ccmask one with TM and other with +; ICMP(cc == 1). +define void @f1_4_1(ptr %a) { +; CHECK-LABEL: f1_4_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgl dummy@PLT +; CHECK-NEXT: .LBB26_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %andcc = and i32 %cc, 1 + %cmpeq0 = icmp eq i32 %andcc, 0 + %cmpeq3 = icmp eq i32 %cc, 3 + %cmp.cond.inv = or i1 %cmpeq3, %cmpeq0 + br i1 %cmp.cond.inv, label %exit, label %branch +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check 'or' for (cc == 0|1). +define void @f1_4_2(ptr %a) { +; CHECK-LABEL: f1_4_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgle dummy@PLT +; CHECK-NEXT: .LBB27_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %or = or disjoint i32 %cc, -4 + %cmp.inv = icmp samesign ugt i32 %or, -3 + br i1 %cmp.inv, label %exit, label %branch +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + +; Check 'or' for (cc == 0|1). +define void @f1_4_3(ptr %a) { +; CHECK-LABEL: f1_4_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: jgle dummy@PLT +; CHECK-NEXT: .LBB28_1: # %exit +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %or = or disjoint i32 %cc, -4 + %cmp = icmp samesign ult i32 %or, -2 + br i1 %cmp, label %branch, label %exit +branch: + tail call void @dummy() + br label %exit +exit: + ret void +} + diff --git a/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-02.ll b/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-02.ll new file mode 100644 index 0000000..b9b9a4b --- /dev/null +++ b/llvm/test/CodeGen/SystemZ/inline-asm-flag-output-02.ll @@ -0,0 +1,1665 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s +; Test implementation of combining select_ccmask for flag output operand and +; optimizing ipm sequence using conditional branches. + +; Test-1(f2_0_*): Both TrueVal and FalseVal non-const(14-valid CCMask). + +; Check (cc == 0). +define i64 @f2_0_eq_0(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_0_eq_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ber %r14 +; CHECK-NEXT: .LBB0_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %cond = icmp eq i32 %cc, 0 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check (cc != 0). +define i64 @f2_0_ne_0(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_0_ne_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bner %r14 +; CHECK-NEXT: .LBB1_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %cond = icmp ugt i32 %cc, 0 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check (cc == 1). +define i64 @f2_0_eq_1(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_0_eq_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB2_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %cond = icmp eq i32 %cc, 1 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check (cc != 1). +define i64 @f2_0_ne_1(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_0_ne_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnlr %r14 +; CHECK-NEXT: .LBB3_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %cond = icmp ne i32 %cc, 1 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check (cc == 2). +define i64 @f2_0_eq_2(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_0_eq_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bhr %r14 +; CHECK-NEXT: .LBB4_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %cond = icmp eq i32 %cc, 2 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check (cc != 2). +define i64 @f2_0_ne_2(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_0_ne_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnhr %r14 +; CHECK-NEXT: .LBB5_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %cond = icmp ne i32 %cc, 2 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check (cc == 3). +define i64 @f2_0_eq_3(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_0_eq_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bor %r14 +; CHECK-NEXT: .LBB6_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %cond = icmp eq i32 %cc, 3 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check (cc != 3). +define i64 @f2_0_ne_3(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_0_ne_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnor %r14 +; CHECK-NEXT: .LBB7_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %cond = icmp ult i32 %cc, 3 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check (cc == 0|1). +define i64 @f2_0_01(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_0_01: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bler %r14 +; CHECK-NEXT: .LBB8_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %cond = icmp ult i32 %cc, 2 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check (cc == 0|2). +define i64 @f2_0_02(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_0_02: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bher %r14 +; CHECK-NEXT: .LBB9_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %and = and i32 %cc, 1 + %cond = icmp eq i32 %and, 0 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check (cc == 0|3). +define i64 @f2_0_03(i64 %y, i64 %x, ptr %a) { +; CHECK-LABEL: f2_0_03: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: blhr %r14 +; CHECK-NEXT: .LBB10_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %cmp0 = icmp ne i32 %cc, 0 + %cmp3 = icmp ne i32 %cc, 3 + %cond.inv = and i1 %cmp0, %cmp3 + %res = select i1 %cond.inv, i64 %y, i64 %x + ret i64 %res +} + +; Check (cc == 1|2). +define i64 @f2_0_12(i64 %y, i64 %x, ptr %a) { +; CHECK-LABEL: f2_0_12: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnlhr %r14 +; CHECK-NEXT: .LBB11_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %add = add nsw i32 %cc, -3 + %cond.inv = icmp ult i32 %add, -2 + %res = select i1 %cond.inv, i64 %y, i64 %x + ret i64 %res +} + +; Check (cc == 1|3). +define i64 @f2_0_13(i64 %y, i64 %x, ptr %a) { +; CHECK-LABEL: f2_0_13: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bher %r14 +; CHECK-NEXT: .LBB12_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %and = and i32 %cc, 1 + %cond.inv = icmp eq i32 %and, 0 + %res = select i1 %cond.inv, i64 %y, i64 %x + ret i64 %res +} + +; Check (cc == 2|3). +define i64 @f2_0_23(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_0_23: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnler %r14 +; CHECK-NEXT: .LBB13_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %cond = icmp ugt i32 %cc, 1 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Test-2(f2_1_*/f2_2_*/f2_3_*/f2_4_*). +; Both TrueVal and FalseVal are non-const with mixed patterns involving +; Binary Ops. + +; Check 'add' for (cc != 0). +define i64 @f2_1_1(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_1_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bner %r14 +; CHECK-NEXT: .LBB14_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -1 + %cond = icmp ult i32 %add, 3 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check 'add' for (cc == 1|2). +define i64 @f2_1_2(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_1_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: blhr %r14 +; CHECK-NEXT: .LBB15_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -1 + %cond = icmp ult i32 %add, 2 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check 'add' for (cc == 1|2). +define i64 @f2_1_3(i64 %y, i64 %x, ptr %a) { +; CHECK-LABEL: f2_1_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnlhr %r14 +; CHECK-NEXT: .LBB16_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -3 + %cond.inv = icmp ult i32 %add, -2 + %res = select i1 %cond.inv, i64 %y, i64 %x + ret i64 %res +} + +; Check 'and' with one operand cc and other select_ccmask(cc !=1). +define i64 @f2_2_1(i64 %y, i64 %x, ptr %a) { +; CHECK-LABEL: f2_2_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB17_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %andcc = and i32 %cc, 1 + %cmpne0 = icmp ne i32 %andcc, 0 + %cmpne3 = icmp ne i32 %cc, 3 + %cond.inv = and i1 %cmpne3, %cmpne0 + %res = select i1 %cond.inv, i64 %y, i64 %x + ret i64 %res +} + +; Check 'and' with both operands select_ccmask(cc != 2). +define i64 @f2_2_2(i64 %y, i64 %x, ptr %a) { +; CHECK-LABEL: f2_2_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bhr %r14 +; CHECK-NEXT: .LBB18_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %ugt1 = icmp samesign ugt i32 %cc, 1 + %cmpne3 = icmp ne i32 %cc, 3 + %cond.inv = and i1 %ugt1, %cmpne3 + %res = select i1 %cond.inv, i64 %y, i64 %x + ret i64 %res +} + +; Check 'and/tm' for (cc == 0|2). +define i64 @f2_2_3(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_2_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bher %r14 +; CHECK-NEXT: .LBB19_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %and = and i32 %cc, 1 + %cond = icmp eq i32 %and, 0 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Check 'and/tm' for (cc == 1|3). +define i64 @f2_2_4(i64 %y, i64 %x, ptr %a) { +; CHECK-LABEL: f2_2_4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bher %r14 +; CHECK-NEXT: .LBB20_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %and = and i32 %cc, 1 + %cond.inv = icmp eq i32 %and, 0 + %res = select i1 %cond.inv, i64 %y, i64 %x + ret i64 %res +} + +; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1). +define i64 @f2_2_5(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_2_5: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnlr %r14 +; CHECK-NEXT: .LBB21_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpne3 = icmp ne i32 %cc, 3 + %cond = xor i1 %cmpne3, %trunc + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + + +; Check nested 'xor' cc with select_ccmask(cc != 1). +define i64 @f2_3_1(i64 %y, i64 %x, ptr %a) { +; CHECK-LABEL: f2_3_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB22_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmpeq0 = icmp eq i32 %cc, 0 + %cmpeq2 = icmp eq i32 %cc, 2 + %xor = xor i1 %cmpeq0, %cmpeq2 + %cmpne3 = icmp ne i32 %cc, 3 + %cond.inv = xor i1 %cmpne3, %xor + %res = select i1 %cond.inv, i64 %y, i64 %x + ret i64 %res +} + +; Check branching on 'tm' and 'xor' with one operand cc and the other +; select_ccmask(cc !=1). +define i64 @f2_3_2(i64 %y, i64 %x, ptr %a) { +; CHECK-LABEL: f2_3_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB23_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpeq3 = icmp eq i32 %cc, 3 + %cond.inv = xor i1 %cmpeq3, %trunc + %res = select i1 %cond.inv, i64 %y, i64 %x + ret i64 %res +} + +; Check branching on 'tm' and 'xor' with one operand cc and the other +; select_ccmask(cc !=2). +define i64 @f2_3_3(i64 %y, i64 %x, ptr %a) { +; CHECK-LABEL: f2_3_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bhr %r14 +; CHECK-NEXT: .LBB24_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpne0 = icmp ne i32 %cc, 0 + %cond.inv = xor i1 %cmpne0, %trunc + %res = select i1 %cond.inv, i64 %y, i64 %x + ret i64 %res +} + +; Check 'or' with both operands select_ccmask with TM and ICMP(cc == 1). +define i64 @f2_4_1(i64 %y, i64 %x, ptr %a) { +; CHECK-LABEL: f2_4_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnlr %r14 +; CHECK-NEXT: .LBB25_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %andcc = and i32 %cc, 1 + %cmpeq0 = icmp eq i32 %andcc, 0 + %cmpeq3 = icmp eq i32 %cc, 3 + %cond.inv = or i1 %cmpeq3, %cmpeq0 + %res = select i1 %cond.inv, i64 %y, i64 %x + ret i64 %res +} + +; Check 'or' for (cc == 0|1). +define i64 @f2_4_2(i64 %y, i64 %x, ptr %a) { +; CHECK-LABEL: f2_4_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnler %r14 +; CHECK-NEXT: .LBB26_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %or = or disjoint i32 %cc, -4 + %cond.inv = icmp samesign ugt i32 %or, -3 + %res = select i1 %cond.inv, i64 %y, i64 %x + ret i64 %res +} + +; Check 'or' for (cc == 0|1). +define i64 @f2_4_3(i64 %x, i64 %y, ptr %a) { +; CHECK-LABEL: f2_4_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r4), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bler %r14 +; CHECK-NEXT: .LBB27_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %or = or disjoint i32 %cc, -4 + %cond = icmp samesign ult i32 %or, -2 + %res = select i1 %cond, i64 %x, i64 %y + ret i64 %res +} + +; Test-3(f3_1_*/f3_2_*/f3_3_*/f3_4_*). +; TrueVal is non-const and FalseVal is const with mixed patterns involving +; Binary Ops. + +; Check 'add' for (cc != 0). +define i64 @f3_1_1(i64 %x, ptr %a) { +; CHECK-LABEL: f3_1_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bner %r14 +; CHECK-NEXT: .LBB28_1: # %entry +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -1 + %cond = icmp ult i32 %add, 3 + %res = select i1 %cond, i64 %x, i64 5 + ret i64 %res +} + +; Check 'add' for (cc == 1|2). +define i64 @f3_1_2(i64 %x, ptr %a) { +; CHECK-LABEL: f3_1_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: blhr %r14 +; CHECK-NEXT: .LBB29_1: # %entry +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -1 + %cond = icmp ult i32 %add, 2 + %res = select i1 %cond, i64 %x, i64 5 + ret i64 %res +} + +; Check 'add' for (cc == 1|2). +define i64 @f3_1_3(ptr %a, i64 %x) { +; CHECK-LABEL: f3_1_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: bnlhr %r14 +; CHECK-NEXT: .LBB30_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -3 + %cond.inv = icmp ult i32 %add, -2 + %res = select i1 %cond.inv, i64 5, i64 %x + ret i64 %res +} + +; Check 'and' with one operand cc and other select_ccmask(cc !=1). +define i64 @f3_2_1(ptr %a, i64 %x) { +; CHECK-LABEL: f3_2_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB31_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %andcc = and i32 %cc, 1 + %cmpne0 = icmp ne i32 %andcc, 0 + %cmpne3 = icmp ne i32 %cc, 3 + %cond.inv = and i1 %cmpne3, %cmpne0 + %res = select i1 %cond.inv, i64 5, i64 %x + ret i64 %res +} + +; Check 'and' with both operands select_ccmask(cc != 2). +define i64 @f3_2_2(ptr %a, i64 %x) { +; CHECK-LABEL: f3_2_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: bhr %r14 +; CHECK-NEXT: .LBB32_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %ugt1 = icmp samesign ugt i32 %cc, 1 + %cmpne3 = icmp ne i32 %cc, 3 + %cond.inv = and i1 %ugt1, %cmpne3 + %res = select i1 %cond.inv, i64 5, i64 %x + ret i64 %res +} + +; Check 'and/tm' for (cc == 0|2). +define i64 @f3_2_3(i64 %x, ptr %a) { +; CHECK-LABEL: f3_2_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bher %r14 +; CHECK-NEXT: .LBB33_1: # %entry +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %and = and i32 %cc, 1 + %cond = icmp eq i32 %and, 0 + %res = select i1 %cond, i64 %x, i64 5 + ret i64 %res +} + +; Check 'and/tm' for (cc == 1|3). +define i64 @f3_2_4(ptr %a, i64 %x) { +; CHECK-LABEL: f3_2_4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: bher %r14 +; CHECK-NEXT: .LBB34_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %and = and i32 %cc, 1 + %cond.inv = icmp eq i32 %and, 0 + %res = select i1 %cond.inv, i64 5, i64 %x + ret i64 %res +} + +; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1). +define i64 @f3_2_5(i64 %x, ptr %a) { +; CHECK-LABEL: f3_2_5: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnlr %r14 +; CHECK-NEXT: .LBB35_1: # %entry +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpne3 = icmp ne i32 %cc, 3 + %cond = xor i1 %cmpne3, %trunc + %res = select i1 %cond, i64 %x, i64 5 + ret i64 %res +} + + +; Check nested 'xor' cc with select_ccmask(cc != 1). +define i64 @f3_3_1(ptr %a, i64 %x) { +; CHECK-LABEL: f3_3_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB36_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmpeq0 = icmp eq i32 %cc, 0 + %cmpeq2 = icmp eq i32 %cc, 2 + %xor = xor i1 %cmpeq0, %cmpeq2 + %cmpne3 = icmp ne i32 %cc, 3 + %cond.inv = xor i1 %cmpne3, %xor + %res = select i1 %cond.inv, i64 5, i64 %x + ret i64 %res +} + +; Check branching on 'tm' and 'xor' with one operand cc and the other +; select_ccmask(cc !=1). +define i64 @f3_3_2(ptr %a, i64 %x) { +; CHECK-LABEL: f3_3_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB37_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpeq3 = icmp eq i32 %cc, 3 + %cond.inv = xor i1 %cmpeq3, %trunc + %res = select i1 %cond.inv, i64 5, i64 %x + ret i64 %res +} + +; Check branching on 'tm' and 'xor' with one operand cc and the other +; select_ccmask(cc !=2). +define i64 @f3_3_3(ptr %a, i64 %x) { +; CHECK-LABEL: f3_3_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: bhr %r14 +; CHECK-NEXT: .LBB38_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpne0 = icmp ne i32 %cc, 0 + %cond.inv = xor i1 %cmpne0, %trunc + %res = select i1 %cond.inv, i64 5, i64 %x + ret i64 %res +} + +; Check 'or' with both operands select_ccmask with TM and ICMP(cc == 1). +define i64 @f3_4_1(ptr %a, i64 %x) { +; CHECK-LABEL: f3_4_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: bnlr %r14 +; CHECK-NEXT: .LBB39_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %andcc = and i32 %cc, 1 + %cmpeq0 = icmp eq i32 %andcc, 0 + %cmpeq3 = icmp eq i32 %cc, 3 + %cond.inv = or i1 %cmpeq3, %cmpeq0 + %res = select i1 %cond.inv, i64 5, i64 %x + ret i64 %res +} + +; Check 'or' for (cc == 0|1). +define i64 @f3_4_2(ptr %a, i64 %x) { +; CHECK-LABEL: f3_4_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: bnler %r14 +; CHECK-NEXT: .LBB40_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %or = or disjoint i32 %cc, -4 + %cond.inv = icmp samesign ugt i32 %or, -3 + %res = select i1 %cond.inv, i64 5, i64 %x + ret i64 %res +} + +; Check 'or' for (cc == 0|1). +define i64 @f3_4_3(i64 %x, ptr %a) { +; CHECK-LABEL: f3_4_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bler %r14 +; CHECK-NEXT: .LBB41_1: # %entry +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %or = or disjoint i32 %cc, -4 + %cond = icmp samesign ult i32 %or, -2 + %res = select i1 %cond, i64 %x, i64 5 + ret i64 %res +} + + +; Test-4(f4_1_*/f4_2_*/f4_3_*/f4_4_*). +; TrueVal is const and FalseVal is non-const with mixed patterns involving +; Binary Ops. + +; Check 'add' for (cc != 0). +define i64 @f4_1_1(ptr %a, i64 %y) { +; CHECK-LABEL: f4_1_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: bner %r14 +; CHECK-NEXT: .LBB42_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -1 + %cond = icmp ult i32 %add, 3 + %res = select i1 %cond, i64 15, i64 %y + ret i64 %res +} + +; Check 'add' for (cc == 1|2). +define i64 @f4_1_2(ptr %a, i64 %y) { +; CHECK-LABEL: f4_1_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: blhr %r14 +; CHECK-NEXT: .LBB43_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -1 + %cond = icmp ult i32 %add, 2 + %res = select i1 %cond, i64 15, i64 %y + ret i64 %res +} + +; Check 'add' for (cc == 1|2). +define i64 @f4_1_3(i64 %y, ptr %a) { +; CHECK-LABEL: f4_1_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnlhr %r14 +; CHECK-NEXT: .LBB44_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -3 + %cond.inv = icmp ult i32 %add, -2 + %res = select i1 %cond.inv, i64 %y, i64 15 + ret i64 %res +} + +; Check 'and' with one operand cc and other select_ccmask(cc !=1). +define i64 @f4_2_1(i64 %y, ptr %a) { +; CHECK-LABEL: f4_2_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB45_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %andcc = and i32 %cc, 1 + %cmpne0 = icmp ne i32 %andcc, 0 + %cmpne3 = icmp ne i32 %cc, 3 + %cond.inv = and i1 %cmpne3, %cmpne0 + %res = select i1 %cond.inv, i64 %y, i64 15 + ret i64 %res +} + +; Check 'and' with both operands select_ccmask(cc != 2). +define i64 @f4_2_2(i64 %y, ptr %a) { +; CHECK-LABEL: f4_2_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bhr %r14 +; CHECK-NEXT: .LBB46_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %ugt1 = icmp samesign ugt i32 %cc, 1 + %cmpne3 = icmp ne i32 %cc, 3 + %cond.inv = and i1 %ugt1, %cmpne3 + %res = select i1 %cond.inv, i64 %y, i64 15 + ret i64 %res +} + +; Check 'and/tm' for (cc == 0|2). +define i64 @f4_2_3(ptr %a, i64 %y) { +; CHECK-LABEL: f4_2_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: bher %r14 +; CHECK-NEXT: .LBB47_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %and = and i32 %cc, 1 + %cond = icmp eq i32 %and, 0 + %res = select i1 %cond, i64 15, i64 %y + ret i64 %res +} + +; Check 'and/tm' for (cc == 1|3). +define i64 @f4_2_4(i64 %y, ptr %a) { +; CHECK-LABEL: f4_2_4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bher %r14 +; CHECK-NEXT: .LBB48_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %and = and i32 %cc, 1 + %cond.inv = icmp eq i32 %and, 0 + %res = select i1 %cond.inv, i64 %y, i64 15 + ret i64 %res +} + +; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1). +define i64 @f4_2_5(ptr %a, i64 %y) { +; CHECK-LABEL: f4_2_5: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: bnlr %r14 +; CHECK-NEXT: .LBB49_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpne3 = icmp ne i32 %cc, 3 + %cond = xor i1 %cmpne3, %trunc + %res = select i1 %cond, i64 15, i64 %y + ret i64 %res +} + + +; Check nested 'xor' cc with select_ccmask(cc != 1). +define i64 @f4_3_1(i64 %y, ptr %a) { +; CHECK-LABEL: f4_3_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB50_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmpeq0 = icmp eq i32 %cc, 0 + %cmpeq2 = icmp eq i32 %cc, 2 + %xor = xor i1 %cmpeq0, %cmpeq2 + %cmpne3 = icmp ne i32 %cc, 3 + %cond.inv = xor i1 %cmpne3, %xor + %res = select i1 %cond.inv, i64 %y, i64 15 + ret i64 %res +} + +; Check branching on 'tm' and 'xor' with one operand cc and the other +; select_ccmask(cc !=1). +define i64 @f4_3_2(i64 %y, ptr %a) { +; CHECK-LABEL: f4_3_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB51_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpeq3 = icmp eq i32 %cc, 3 + %cond.inv = xor i1 %cmpeq3, %trunc + %res = select i1 %cond.inv, i64 %y, i64 15 + ret i64 %res +} + +; Check branching on 'tm' and 'xor' with one operand cc and the other +; select_ccmask(cc !=2). +define i64 @f4_3_3(i64 %y, ptr %a) { +; CHECK-LABEL: f4_3_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bhr %r14 +; CHECK-NEXT: .LBB52_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpne0 = icmp ne i32 %cc, 0 + %cond.inv = xor i1 %cmpne0, %trunc + %res = select i1 %cond.inv, i64 %y, i64 15 + ret i64 %res +} + +; Check 'or' with both operands select_ccmask with TM and ICMP(cc == 1). +define i64 @f4_4_1(i64 %y,ptr %a) { +; CHECK-LABEL: f4_4_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnlr %r14 +; CHECK-NEXT: .LBB53_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %andcc = and i32 %cc, 1 + %cmpeq0 = icmp eq i32 %andcc, 0 + %cmpeq3 = icmp eq i32 %cc, 3 + %cond.inv = or i1 %cmpeq3, %cmpeq0 + %res = select i1 %cond.inv, i64 %y, i64 15 + ret i64 %res +} + +; Check 'or' for (cc == 0|1). +define i64 @f4_4_2(i64 %y, ptr %a) { +; CHECK-LABEL: f4_4_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r3), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: bnler %r14 +; CHECK-NEXT: .LBB54_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %or = or disjoint i32 %cc, -4 + %cond.inv = icmp samesign ugt i32 %or, -3 + %res = select i1 %cond.inv, i64 %y, i64 15 + ret i64 %res +} + +; Check 'or' for (cc == 0|1). +define i64 @f4_4_3(ptr %a, i64 %y) { +; CHECK-LABEL: f4_4_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: bler %r14 +; CHECK-NEXT: .LBB55_1: # %entry +; CHECK-NEXT: lgr %r2, %r3 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %or = or disjoint i32 %cc, -4 + %cond = icmp samesign ult i32 %or, -2 + %res = select i1 %cond, i64 15, i64 %y + ret i64 %res +} + +; Test-5(f5_1_*/f5_2_*/f5_3_*/f5_4_*). +; Both TrueVal and FalseVal are const with mixed patterns involving +; Binary Ops. + + +; Check 'add' for (cc != 0). +define i64 @f5_1_1(ptr %a) { +; CHECK-LABEL: f5_1_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: bner %r14 +; CHECK-NEXT: .LBB56_1: # %entry +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -1 + %cond = icmp ult i32 %add, 3 + %res = select i1 %cond, i64 15, i64 5 + ret i64 %res +} + +; Check 'add' for (cc == 1|2). +define i64 @f5_1_2(ptr %a) { +; CHECK-LABEL: f5_1_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: blhr %r14 +; CHECK-NEXT: .LBB57_1: # %entry +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -1 + %cond = icmp ult i32 %add, 2 + %res = select i1 %cond, i64 15, i64 5 + ret i64 %res +} + +; Check 'add' for (cc == 1|2). +define i64 @f5_1_3(ptr %a) { +; CHECK-LABEL: f5_1_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: bnlhr %r14 +; CHECK-NEXT: .LBB58_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %add = add nsw i32 %cc, -3 + %cond.inv = icmp ult i32 %add, -2 + %res = select i1 %cond.inv, i64 5, i64 15 + ret i64 %res +} + +; Check 'and' with one operand cc and other select_ccmask(cc !=1). +define i64 @f5_2_1(ptr %a) { +; CHECK-LABEL: f5_2_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB59_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %andcc = and i32 %cc, 1 + %cmpne0 = icmp ne i32 %andcc, 0 + %cmpne3 = icmp ne i32 %cc, 3 + %cond.inv = and i1 %cmpne3, %cmpne0 + %res = select i1 %cond.inv, i64 5, i64 15 + ret i64 %res +} + +; Check 'and' with both operands select_ccmask(cc != 2). +define i64 @f5_2_2(ptr %a) { +; CHECK-LABEL: f5_2_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: bhr %r14 +; CHECK-NEXT: .LBB60_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %ugt1 = icmp samesign ugt i32 %cc, 1 + %cmpne3 = icmp ne i32 %cc, 3 + %cond.inv = and i1 %ugt1, %cmpne3 + %res = select i1 %cond.inv, i64 5, i64 15 + ret i64 %res +} + +; Check 'and/tm' for (cc == 0|2). +define i64 @f5_2_3(ptr %a) { +; CHECK-LABEL: f5_2_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: bher %r14 +; CHECK-NEXT: .LBB61_1: # %entry +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %and = and i32 %cc, 1 + %cond = icmp eq i32 %and, 0 + %res = select i1 %cond, i64 15, i64 5 + ret i64 %res +} + +; Check 'and/tm' for (cc == 1|3). +define i64 @f5_2_4(ptr %a) { +; CHECK-LABEL: f5_2_4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: bher %r14 +; CHECK-NEXT: .LBB62_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %and = and i32 %cc, 1 + %cond.inv = icmp eq i32 %and, 0 + %res = select i1 %cond.inv, i64 5, i64 15 + ret i64 %res +} + +; Check 'icmp' with one operand 'and' and other 'select_ccmask'(cc != 1). +define i64 @f5_2_5(ptr %a) { +; CHECK-LABEL: f5_2_5: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: bnlr %r14 +; CHECK-NEXT: .LBB63_1: # %entry +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpne3 = icmp ne i32 %cc, 3 + %cond = xor i1 %cmpne3, %trunc + %res = select i1 %cond, i64 15, i64 5 + ret i64 %res +} + + +; Check nested 'xor' cc with select_ccmask(cc != 1). +define i64 @f5_3_1(ptr %a) { +; CHECK-LABEL: f5_3_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB64_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %cmpeq0 = icmp eq i32 %cc, 0 + %cmpeq2 = icmp eq i32 %cc, 2 + %xor = xor i1 %cmpeq0, %cmpeq2 + %cmpne3 = icmp ne i32 %cc, 3 + %cond.inv = xor i1 %cmpne3, %xor + %res = select i1 %cond.inv, i64 5, i64 15 + ret i64 %res +} + +; Check branching on 'tm' and 'xor' with one operand cc and the other +; select_ccmask(cc !=1). +define i64 @f5_3_2(ptr %a) { +; CHECK-LABEL: f5_3_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: blr %r14 +; CHECK-NEXT: .LBB65_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpeq3 = icmp eq i32 %cc, 3 + %cond.inv = xor i1 %cmpeq3, %trunc + %res = select i1 %cond.inv, i64 5, i64 15 + ret i64 %res +} + +; Check branching on 'tm' and 'xor' with one operand cc and the other +; select_ccmask(cc !=2). +define i64 @f5_3_3(ptr %a) { +; CHECK-LABEL: f5_3_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: bhr %r14 +; CHECK-NEXT: .LBB66_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %trunc = trunc i32 %cc to i1 + %cmpne0 = icmp ne i32 %cc, 0 + %cond.inv = xor i1 %cmpne0, %trunc + %res = select i1 %cond.inv, i64 5, i64 15 + ret i64 %res +} + +; Check 'or' with both operands select_ccmask with TM and ICMP(cc == 1). +define i64 @f5_4_1(ptr %a) { +; CHECK-LABEL: f5_4_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: bnlr %r14 +; CHECK-NEXT: .LBB67_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %andcc = and i32 %cc, 1 + %cmpeq0 = icmp eq i32 %andcc, 0 + %cmpeq3 = icmp eq i32 %cc, 3 + %cond.inv = or i1 %cmpeq3, %cmpeq0 + %res = select i1 %cond.inv, i64 5, i64 15 + ret i64 %res +} + +; Check 'or' for (cc == 0|1). +define i64 @f5_4_2(ptr %a) { +; CHECK-LABEL: f5_4_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: bnler %r14 +; CHECK-NEXT: .LBB68_1: # %entry +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %or = or disjoint i32 %cc, -4 + %cond.inv = icmp samesign ugt i32 %or, -3 + %res = select i1 %cond.inv, i64 5, i64 15 + ret i64 %res +} + +; Check 'or' for (cc == 0|1). +define i64 @f5_4_3(ptr %a) { +; CHECK-LABEL: f5_4_3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: bler %r14 +; CHECK-NEXT: .LBB69_1: # %entry +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %tmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %tmp) + %or = or disjoint i32 %cc, -4 + %cond = icmp samesign ult i32 %or, -2 + %res = select i1 %cond, i64 15, i64 5 + ret i64 %res +} + +; Nested select_ccmask with TrueVal and FalseVal swapped with each other. +define i64 @f6_1(ptr %a) { +; CHECK-LABEL: f6_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: alsi 0(%r2), -1 +; CHECK-EMPTY: +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lghi %r2, 15 +; CHECK-NEXT: bher %r14 +; CHECK-NEXT: .LBB70_1: # %entry +; CHECK-NEXT: lghi %r2, 5 +; CHECK-NEXT: br %r14 +entry: + %cc = tail call i32 asm sideeffect "alsi $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr elementtype(i32) %a, ptr elementtype(i32) %a) + %cmp = icmp ult i32 %cc, 4 + tail call void @llvm.assume(i1 %cmp) + %andcc = and i32 %cc, 1 + %cmpeq0 = icmp eq i32 %andcc, 0 + %cmpeq3 = icmp eq i32 %cc, 3 + %select = select i1 %cmpeq3, i64 5, i64 15 + %res = select i1 %cmpeq0, i64 %select, i64 5 + ret i64 %res +} + diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-basic.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-basic.ll index c2b4494..11e7e5c 100644 --- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-basic.ll +++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-basic.ll @@ -1,16 +1,41 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -mtriple=thumbv8.1m.main -mve-tail-predication -tail-predication=enabled -mattr=+mve,+lob %s -S -o - | FileCheck %s -; CHECK-LABEL: mul_v16i8 -; CHECK-NOT: %num.elements = add i32 %trip.count.minus.1, 1 -; CHECK: vector.body: -; CHECK: %index = phi i32 -; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[REMAINING:%[^ ]+]], %vector.body ] -; CHECK: [[VCTP:%[^ ]+]] = call <16 x i1> @llvm.arm.mve.vctp8(i32 [[ELEMS]]) -; CHECK: [[REMAINING]] = sub i32 [[ELEMS]], 16 -; CHECK: [[LD0:%[^ ]+]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr {{.*}}, i32 4, <16 x i1> [[VCTP]], <16 x i8> undef) -; CHECK: [[LD1:%[^ ]+]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr {{.*}}, i32 4, <16 x i1> [[VCTP]], <16 x i8> undef) -; CHECK: tail call void @llvm.masked.store.v16i8.p0(<16 x i8> {{.*}}, ptr {{.*}}, i32 4, <16 x i1> [[VCTP]]) define dso_local arm_aapcs_vfpcc void @mul_v16i8(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) { +; CHECK-LABEL: define dso_local arm_aapcs_vfpcc void @mul_v16i8( +; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], ptr noalias captures(none) [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[N]], 15 +; CHECK-NEXT: [[TMP9:%.*]] = lshr i32 [[TMP8]], 4 +; CHECK-NEXT: [[TMP10:%.*]] = shl nuw i32 [[TMP9]], 4 +; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], -16 +; CHECK-NEXT: [[TMP12:%.*]] = lshr i32 [[TMP11]], 4 +; CHECK-NEXT: [[TMP13:%.*]] = add nuw nsw i32 [[TMP12]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_COND_CLEANUP:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP13]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[N]], %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.vctp8(i32 [[TMP0]]) +; CHECK-NEXT: [[TMP2]] = sub i32 [[TMP0]], 16 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP]], i32 4, <16 x i1> [[TMP1]], <16 x i8> undef) +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD2:%.*]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP3]], i32 4, <16 x i1> [[TMP1]], <16 x i8> undef) +; CHECK-NEXT: [[MUL:%.*]] = mul nsw <16 x i8> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[C]], i32 [[INDEX]] +; CHECK-NEXT: tail call void @llvm.masked.store.v16i8.p0(<16 x i8> [[MUL]], ptr [[TMP6]], i32 4, <16 x i1> [[TMP1]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 16 +; CHECK-NEXT: [[TMP15]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP14]], i32 1) +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +; CHECK-NEXT: br i1 [[TMP16]], label %[[VECTOR_BODY]], label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp eq i32 %N, 0 %tmp8 = add i32 %N, 15 @@ -45,17 +70,41 @@ for.cond.cleanup: ; preds = %vector.body, %entry ret void } -; CHECK-LABEL: mul_v8i16 -; CHECK-NOT: %num.elements = add i32 %trip.count.minus.1, 1 -; CHECK: vector.body: -; CHECK: %index = phi i32 -; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[REMAINING:%[^ ]+]], %vector.body ] -; CHECK: [[VCTP:%[^ ]+]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[ELEMS]]) -; CHECK: [[REMAINING]] = sub i32 [[ELEMS]], 8 -; CHECK: [[LD0:%[^ ]+]] = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr {{.*}}, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef) -; CHECK: [[LD1:%[^ ]+]] = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr {{.*}}, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef) -; CHECK: tail call void @llvm.masked.store.v8i16.p0(<8 x i16> {{.*}}, ptr {{.*}}, i32 4, <8 x i1> [[VCTP]]) define dso_local arm_aapcs_vfpcc void @mul_v8i16(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) { +; CHECK-LABEL: define dso_local arm_aapcs_vfpcc void @mul_v8i16( +; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], ptr noalias captures(none) [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[N]], 7 +; CHECK-NEXT: [[TMP9:%.*]] = lshr i32 [[TMP8]], 3 +; CHECK-NEXT: [[TMP10:%.*]] = shl nuw i32 [[TMP9]], 3 +; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], -8 +; CHECK-NEXT: [[TMP12:%.*]] = lshr i32 [[TMP11]], 3 +; CHECK-NEXT: [[TMP13:%.*]] = add nuw nsw i32 [[TMP12]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_COND_CLEANUP:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP13]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[N]], %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i16, ptr [[A]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[TMP0]]) +; CHECK-NEXT: [[TMP2]] = sub i32 [[TMP0]], 8 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP]], i32 4, <8 x i1> [[TMP1]], <8 x i16> undef) +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD2:%.*]] = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP3]], i32 4, <8 x i1> [[TMP1]], <8 x i16> undef) +; CHECK-NEXT: [[MUL:%.*]] = mul nsw <8 x i16> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[C]], i32 [[INDEX]] +; CHECK-NEXT: tail call void @llvm.masked.store.v8i16.p0(<8 x i16> [[MUL]], ptr [[TMP6]], i32 4, <8 x i1> [[TMP1]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 8 +; CHECK-NEXT: [[TMP15]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP14]], i32 1) +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +; CHECK-NEXT: br i1 [[TMP16]], label %[[VECTOR_BODY]], label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp eq i32 %N, 0 %tmp8 = add i32 %N, 7 @@ -90,16 +139,41 @@ for.cond.cleanup: ; preds = %vector.body, %entry ret void } -; CHECK-LABEL: mul_v4i32 -; CHECK-NOT: %num.elements = add i32 %trip.count.minus.1, 1 -; CHECK: vector.body: -; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[REMAINING:%[^ ]+]], %vector.body ] -; CHECK: [[VCTP:%[^ ]+]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[ELEMS]]) -; CHECK: [[REMAINING]] = sub i32 [[ELEMS]], 4 -; CHECK: [[LD0:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef) -; CHECK: [[LD1:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef) -; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> [[VCTP]]) define dso_local arm_aapcs_vfpcc void @mul_v4i32(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) { +; CHECK-LABEL: define dso_local arm_aapcs_vfpcc void @mul_v4i32( +; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], ptr noalias captures(none) [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[N]], 3 +; CHECK-NEXT: [[TMP9:%.*]] = lshr i32 [[TMP8]], 2 +; CHECK-NEXT: [[TMP10:%.*]] = shl nuw i32 [[TMP9]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], -4 +; CHECK-NEXT: [[TMP12:%.*]] = lshr i32 [[TMP11]], 2 +; CHECK-NEXT: [[TMP13:%.*]] = add nuw nsw i32 [[TMP12]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_COND_CLEANUP:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP13]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[N]], %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP0]]) +; CHECK-NEXT: [[TMP2]] = sub i32 [[TMP0]], 4 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD2:%.*]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP3]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[MUL:%.*]] = mul nsw <4 x i32> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C]], i32 [[INDEX]] +; CHECK-NEXT: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> [[MUL]], ptr [[TMP6]], i32 4, <4 x i1> [[TMP1]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP15]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP14]], i32 1) +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +; CHECK-NEXT: br i1 [[TMP16]], label %[[VECTOR_BODY]], label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp eq i32 %N, 0 %tmp8 = add i32 %N, 3 @@ -134,17 +208,47 @@ for.cond.cleanup: ; preds = %vector.body, %entry ret void } -; CHECK-LABEL: split_vector -; CHECK-NOT: %num.elements = add i32 %trip.count.minus.1, 1 -; CHECK: vector.body: -; CHECK: %index = phi i32 -; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[REMAINING:%[^ ]+]], %vector.body ] -; CHECK: [[VCTP:%[^ ]+]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[ELEMS]]) -; CHECK: [[REMAINING]] = sub i32 [[ELEMS]], 4 -; CHECK: [[LD0:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef) -; CHECK: [[LD1:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef) -; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> [[VCTP]]) define dso_local arm_aapcs_vfpcc void @split_vector(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) { +; CHECK-LABEL: define dso_local arm_aapcs_vfpcc void @split_vector( +; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], ptr noalias captures(none) [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[N]], 3 +; CHECK-NEXT: [[TMP9:%.*]] = lshr i32 [[TMP8]], 2 +; CHECK-NEXT: [[TMP10:%.*]] = shl nuw i32 [[TMP9]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], -4 +; CHECK-NEXT: [[TMP12:%.*]] = lshr i32 [[TMP11]], 2 +; CHECK-NEXT: [[TMP13:%.*]] = add nuw nsw i32 [[TMP12]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_COND_CLEANUP:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP13]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[N]], %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP0]]) +; CHECK-NEXT: [[TMP2]] = sub i32 [[TMP0]], 4 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[EXTRACT_1_LOW:%.*]] = shufflevector <4 x i32> [[WIDE_MASKED_LOAD]], <4 x i32> undef, <2 x i32> <i32 0, i32 2> +; CHECK-NEXT: [[EXTRACT_1_HIGH:%.*]] = shufflevector <4 x i32> [[WIDE_MASKED_LOAD]], <4 x i32> undef, <2 x i32> <i32 1, i32 3> +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD2:%.*]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP3]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[EXTRACT_2_LOW:%.*]] = shufflevector <4 x i32> [[WIDE_MASKED_LOAD2]], <4 x i32> undef, <2 x i32> <i32 0, i32 2> +; CHECK-NEXT: [[EXTRACT_2_HIGH:%.*]] = shufflevector <4 x i32> [[WIDE_MASKED_LOAD2]], <4 x i32> undef, <2 x i32> <i32 1, i32 3> +; CHECK-NEXT: [[MUL:%.*]] = mul nsw <2 x i32> [[EXTRACT_1_LOW]], [[EXTRACT_2_LOW]] +; CHECK-NEXT: [[SUB:%.*]] = sub nsw <2 x i32> [[EXTRACT_1_HIGH]], [[EXTRACT_2_HIGH]] +; CHECK-NEXT: [[COMBINE:%.*]] = shufflevector <2 x i32> [[MUL]], <2 x i32> [[SUB]], <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C]], i32 [[INDEX]] +; CHECK-NEXT: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> [[COMBINE]], ptr [[TMP6]], i32 4, <4 x i1> [[TMP1]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP15]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP14]], i32 1) +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +; CHECK-NEXT: br i1 [[TMP16]], label %[[VECTOR_BODY]], label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp eq i32 %N, 0 %tmp8 = add i32 %N, 3 @@ -186,14 +290,48 @@ for.cond.cleanup: ; preds = %vector.body, %entry } ; One of the loads now uses ult predicate. -; CHECK-LABEL: mismatch_load_pred -; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[REMAINING:%[^ ]+]], %vector.body ] -; CHECK: [[VCTP:%[^ ]+]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[ELEMS]]) -; CHECK: [[REMAINING]] = sub i32 [[ELEMS]], 4 -; CHECK: [[LD0:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef) -; CHECK: [[LD1:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> %wrong, <4 x i32> undef) -; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> [[VCTP]]) define dso_local arm_aapcs_vfpcc void @mismatch_load_pred(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) { +; CHECK-LABEL: define dso_local arm_aapcs_vfpcc void @mismatch_load_pred( +; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], ptr noalias captures(none) [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[N]], 3 +; CHECK-NEXT: [[TMP9:%.*]] = lshr i32 [[TMP8]], 2 +; CHECK-NEXT: [[TMP10:%.*]] = shl nuw i32 [[TMP9]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], -4 +; CHECK-NEXT: [[TMP12:%.*]] = lshr i32 [[TMP11]], 2 +; CHECK-NEXT: [[TMP13:%.*]] = add nuw nsw i32 [[TMP12]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_COND_CLEANUP:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = add i32 [[N]], -1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT10:%.*]] = insertelement <4 x i32> undef, i32 [[TRIP_COUNT_MINUS_1]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT11:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT10]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP13]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[N]], %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP0]]) +; CHECK-NEXT: [[TMP2]] = sub i32 [[TMP0]], 4 +; CHECK-NEXT: [[WRONG:%.*]] = icmp ult <4 x i32> [[INDUCTION]], [[BROADCAST_SPLAT11]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP3]], i32 4, <4 x i1> [[WRONG]], <4 x i32> undef) +; CHECK-NEXT: [[TMP5:%.*]] = mul nsw <4 x i32> [[WIDE_MASKED_LOAD12]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C]], i32 [[INDEX]] +; CHECK-NEXT: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> [[TMP5]], ptr [[TMP6]], i32 4, <4 x i1> [[TMP1]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP15]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP14]], i32 1) +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +; CHECK-NEXT: br i1 [[TMP16]], label %[[VECTOR_BODY]], label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp eq i32 %N, 0 %tmp8 = add i32 %N, 3 @@ -236,17 +374,48 @@ for.cond.cleanup: ; preds = %vector.body, %entry } ; The store now uses ult predicate. -; CHECK-LABEL: mismatch_store_pred -; CHECK-NOT: %num.elements = add i32 %trip.count.minus.1, 1 -; CHECK: vector.body: -; CHECK: %index = phi i32 -; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[REMAINING:%[^ ]+]], %vector.body ] -; CHECK: [[VCTP:%[^ ]+]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[ELEMS]]) -; CHECK: [[REMAINING]] = sub i32 [[ELEMS]], 4 -; CHECK: [[LD0:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef) -; CHECK: [[LD1:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef) -; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> %wrong) define dso_local arm_aapcs_vfpcc void @mismatch_store_pred(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) { +; CHECK-LABEL: define dso_local arm_aapcs_vfpcc void @mismatch_store_pred( +; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], ptr noalias captures(none) [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[N]], 3 +; CHECK-NEXT: [[TMP9:%.*]] = lshr i32 [[TMP8]], 2 +; CHECK-NEXT: [[TMP10:%.*]] = shl nuw i32 [[TMP9]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], -4 +; CHECK-NEXT: [[TMP12:%.*]] = lshr i32 [[TMP11]], 2 +; CHECK-NEXT: [[TMP13:%.*]] = add nuw nsw i32 [[TMP12]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_COND_CLEANUP:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = add i32 [[N]], -1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT10:%.*]] = insertelement <4 x i32> undef, i32 [[TRIP_COUNT_MINUS_1]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT11:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT10]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP13]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[N]], %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP0]]) +; CHECK-NEXT: [[TMP2]] = sub i32 [[TMP0]], 4 +; CHECK-NEXT: [[WRONG:%.*]] = icmp ult <4 x i32> [[INDUCTION]], [[BROADCAST_SPLAT11]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP3]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[TMP5:%.*]] = mul nsw <4 x i32> [[WIDE_MASKED_LOAD12]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C]], i32 [[INDEX]] +; CHECK-NEXT: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> [[TMP5]], ptr [[TMP6]], i32 4, <4 x i1> [[WRONG]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP15]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP14]], i32 1) +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +; CHECK-NEXT: br i1 [[TMP16]], label %[[VECTOR_BODY]], label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp eq i32 %N, 0 %tmp8 = add i32 %N, 3 @@ -294,14 +463,72 @@ for.cond.cleanup: ; preds = %vector.body, %entry ; ; Step value 16 doesn't match vector width 4 ; -; CHECK-LABEL: interleave4 -; CHECK: vector.body: -; CHECK: %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) -; CHECK: %active.lane.mask{{.*}} = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %v7, i32 %N) -; CHECK: %active.lane.mask{{.*}} = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %v8, i32 %N) -; CHECK: %active.lane.mask{{.*}} = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %v9, i32 %N) -; define dso_local void @interleave4(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 { +; CHECK-LABEL: define dso_local void @interleave4( +; CHECK-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], ptr noalias readonly captures(none) [[C:%.*]], i32 [[N:%.*]]) local_unnamed_addr #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[N]], 0 +; CHECK-NEXT: [[V0:%.*]] = add i32 [[N]], 15 +; CHECK-NEXT: [[V1:%.*]] = lshr i32 [[V0]], 4 +; CHECK-NEXT: [[V2:%.*]] = shl nuw i32 [[V1]], 4 +; CHECK-NEXT: [[V3:%.*]] = add i32 [[V2]], -16 +; CHECK-NEXT: [[V4:%.*]] = lshr i32 [[V3]], 4 +; CHECK-NEXT: [[V5:%.*]] = add nuw nsw i32 [[V4]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[VECTOR_PH:.*]], label %[[FOR_COND_CLEANUP:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, ptr [[A]], i32 8 +; CHECK-NEXT: [[SCEVGEP30:%.*]] = getelementptr i32, ptr [[C]], i32 8 +; CHECK-NEXT: [[SCEVGEP37:%.*]] = getelementptr i32, ptr [[B]], i32 8 +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[V5]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[LSR_IV38:%.*]] = phi ptr [ [[SCEVGEP39:%.*]], %[[VECTOR_BODY]] ], [ [[SCEVGEP37]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[LSR_IV31:%.*]] = phi ptr [ [[SCEVGEP32:%.*]], %[[VECTOR_BODY]] ], [ [[SCEVGEP30]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP25:%.*]], %[[VECTOR_BODY]] ], [ [[SCEVGEP]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[V14:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[V6:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[V15:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 [[N]]) +; CHECK-NEXT: [[V7:%.*]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[ACTIVE_LANE_MASK15:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[V7]], i32 [[N]]) +; CHECK-NEXT: [[V8:%.*]] = add i32 [[V7]], 4 +; CHECK-NEXT: [[ACTIVE_LANE_MASK16:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[V8]], i32 [[N]]) +; CHECK-NEXT: [[V9:%.*]] = add i32 [[V8]], 4 +; CHECK-NEXT: [[ACTIVE_LANE_MASK17:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[V9]], i32 [[N]]) +; CHECK-NEXT: [[SCEVGEP42:%.*]] = getelementptr <4 x i32>, ptr [[LSR_IV38]], i32 -2 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[SCEVGEP42]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[SCEVGEP43:%.*]] = getelementptr <4 x i32>, ptr [[LSR_IV38]], i32 -1 +; CHECK-NEXT: [[WIDE_MASKED_LOAD18:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr nonnull [[SCEVGEP43]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK15]], <4 x i32> undef) +; CHECK-NEXT: [[WIDE_MASKED_LOAD19:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr nonnull [[LSR_IV38]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK16]], <4 x i32> undef) +; CHECK-NEXT: [[SCEVGEP41:%.*]] = getelementptr <4 x i32>, ptr [[LSR_IV38]], i32 1 +; CHECK-NEXT: [[WIDE_MASKED_LOAD20:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr nonnull [[SCEVGEP41]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK17]], <4 x i32> undef) +; CHECK-NEXT: [[SCEVGEP34:%.*]] = getelementptr <4 x i32>, ptr [[LSR_IV31]], i32 -2 +; CHECK-NEXT: [[WIDE_MASKED_LOAD21:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[SCEVGEP34]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[SCEVGEP35:%.*]] = getelementptr <4 x i32>, ptr [[LSR_IV31]], i32 -1 +; CHECK-NEXT: [[WIDE_MASKED_LOAD22:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr nonnull [[SCEVGEP35]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK15]], <4 x i32> undef) +; CHECK-NEXT: [[WIDE_MASKED_LOAD23:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr nonnull [[LSR_IV31]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK16]], <4 x i32> undef) +; CHECK-NEXT: [[SCEVGEP36:%.*]] = getelementptr <4 x i32>, ptr [[LSR_IV31]], i32 1 +; CHECK-NEXT: [[WIDE_MASKED_LOAD24:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr nonnull [[SCEVGEP36]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK17]], <4 x i32> undef) +; CHECK-NEXT: [[V10:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD21]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[V11:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD22]], [[WIDE_MASKED_LOAD18]] +; CHECK-NEXT: [[V12:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD23]], [[WIDE_MASKED_LOAD19]] +; CHECK-NEXT: [[V13:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD24]], [[WIDE_MASKED_LOAD20]] +; CHECK-NEXT: [[SCEVGEP27:%.*]] = getelementptr <4 x i32>, ptr [[LSR_IV]], i32 -2 +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[V10]], ptr [[SCEVGEP27]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[SCEVGEP28:%.*]] = getelementptr <4 x i32>, ptr [[LSR_IV]], i32 -1 +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[V11]], ptr [[SCEVGEP28]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK15]]) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[V12]], ptr [[LSR_IV]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK16]]) +; CHECK-NEXT: [[SCEVGEP29:%.*]] = getelementptr <4 x i32>, ptr [[LSR_IV]], i32 1 +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[V13]], ptr [[SCEVGEP29]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK17]]) +; CHECK-NEXT: [[SCEVGEP25]] = getelementptr i32, ptr [[LSR_IV]], i32 16 +; CHECK-NEXT: [[SCEVGEP32]] = getelementptr i32, ptr [[LSR_IV31]], i32 16 +; CHECK-NEXT: [[SCEVGEP39]] = getelementptr i32, ptr [[LSR_IV38]], i32 16 +; CHECK-NEXT: [[V14]] = add i32 [[V9]], 4 +; CHECK-NEXT: [[V15]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[V6]], i32 1) +; CHECK-NEXT: [[V16:%.*]] = icmp ne i32 [[V15]], 0 +; CHECK-NEXT: br i1 [[V16]], label %[[VECTOR_BODY]], label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp sgt i32 %N, 0 %v0 = add i32 %N, 15 @@ -370,12 +597,42 @@ for.cond.cleanup: ret void } -; CHECK-LABEL: const_expected_in_set_loop -; CHECK: call <4 x i1> @llvm.get.active.lane.mask -; CHECK-NOT: vctp -; CHECK: ret void -; define dso_local void @const_expected_in_set_loop(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 { +; CHECK-LABEL: define dso_local void @const_expected_in_set_loop( +; CHECK-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], ptr noalias readonly captures(none) [[C:%.*]], i32 [[N:%.*]]) local_unnamed_addr #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[N]], 0 +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], 3 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], -4 +; CHECK-NEXT: [[TMP4:%.*]] = lshr i32 [[TMP3]], 2 +; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i32 [[TMP4]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[VECTOR_PH:.*]], label %[[FOR_COND_CLEANUP:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP5]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[LSR_IV17:%.*]] = phi ptr [ [[SCEVGEP18:%.*]], %[[VECTOR_BODY]] ], [ [[A]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[LSR_IV14:%.*]] = phi ptr [ [[SCEVGEP15:%.*]], %[[VECTOR_BODY]] ], [ [[C]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[VECTOR_BODY]] ], [ [[B]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 42) +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV14]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD12]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[TMP7]], ptr [[LSR_IV17]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, ptr [[LSR_IV]], i32 4 +; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, ptr [[LSR_IV14]], i32 4 +; CHECK-NEXT: [[SCEVGEP18]] = getelementptr i32, ptr [[LSR_IV17]], i32 4 +; CHECK-NEXT: [[TMP8]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP6]], i32 1) +; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 +; CHECK-NEXT: br i1 [[TMP9]], label %[[VECTOR_BODY]], label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp sgt i32 %N, 0 %0 = add i32 %N, 3 @@ -413,12 +670,42 @@ for.cond.cleanup: ; preds = %vector.body, %entry ret void } -; CHECK-LABEL: tripcount_arg_not_invariant -; CHECK: call <4 x i1> @llvm.get.active.lane.mask -; CHECK-NOT: vctp -; CHECK: ret void -; define dso_local void @tripcount_arg_not_invariant(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 { +; CHECK-LABEL: define dso_local void @tripcount_arg_not_invariant( +; CHECK-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], ptr noalias readonly captures(none) [[C:%.*]], i32 [[N:%.*]]) local_unnamed_addr #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[N]], 0 +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], 3 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], -4 +; CHECK-NEXT: [[TMP4:%.*]] = lshr i32 [[TMP3]], 2 +; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i32 [[TMP4]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[VECTOR_PH:.*]], label %[[FOR_COND_CLEANUP:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP5]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[LSR_IV17:%.*]] = phi ptr [ [[SCEVGEP18:%.*]], %[[VECTOR_BODY]] ], [ [[A]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[LSR_IV14:%.*]] = phi ptr [ [[SCEVGEP15:%.*]], %[[VECTOR_BODY]] ], [ [[C]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[VECTOR_BODY]] ], [ [[B]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 [[INDEX]]) +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV14]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD12]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[TMP7]], ptr [[LSR_IV17]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, ptr [[LSR_IV]], i32 4 +; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, ptr [[LSR_IV14]], i32 4 +; CHECK-NEXT: [[SCEVGEP18]] = getelementptr i32, ptr [[LSR_IV17]], i32 4 +; CHECK-NEXT: [[TMP8]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP6]], i32 1) +; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 +; CHECK-NEXT: br i1 [[TMP9]], label %[[VECTOR_BODY]], label %[[VECTOR_PH]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp sgt i32 %N, 0 %0 = add i32 %N, 3 @@ -458,12 +745,42 @@ for.cond.cleanup: ; preds = %vector.body, %entry ret void } -; CHECK-LABEL: addrec_base_not_zero -; CHECK: call <4 x i1> @llvm.get.active.lane.mask -; CHECK-NOT: vctp -; CHECK: ret void -; define dso_local void @addrec_base_not_zero(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 { +; CHECK-LABEL: define dso_local void @addrec_base_not_zero( +; CHECK-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], ptr noalias readonly captures(none) [[C:%.*]], i32 [[N:%.*]]) local_unnamed_addr #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[N]], 0 +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], 3 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], -4 +; CHECK-NEXT: [[TMP4:%.*]] = lshr i32 [[TMP3]], 2 +; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i32 [[TMP4]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[VECTOR_PH:.*]], label %[[FOR_COND_CLEANUP:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP5]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[LSR_IV17:%.*]] = phi ptr [ [[SCEVGEP18:%.*]], %[[VECTOR_BODY]] ], [ [[A]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[LSR_IV14:%.*]] = phi ptr [ [[SCEVGEP15:%.*]], %[[VECTOR_BODY]] ], [ [[C]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[VECTOR_BODY]] ], [ [[B]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 1, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 [[N]]) +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[WIDE_MASKED_LOAD12:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV14]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD12]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[TMP7]], ptr [[LSR_IV17]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, ptr [[LSR_IV]], i32 4 +; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, ptr [[LSR_IV14]], i32 4 +; CHECK-NEXT: [[SCEVGEP18]] = getelementptr i32, ptr [[LSR_IV17]], i32 4 +; CHECK-NEXT: [[TMP8]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP6]], i32 1) +; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 +; CHECK-NEXT: br i1 [[TMP9]], label %[[VECTOR_BODY]], label %[[VECTOR_PH]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp sgt i32 %N, 0 %0 = add i32 %N, 3 diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-reduce.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-reduce.ll index fa6a66b..9775cf9 100644 --- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-reduce.ll +++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-reduce.ll @@ -1,15 +1,55 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --prefix-filecheck-ir-name INST --version 6 ; RUN: opt -mtriple=thumbv8.1m.main -mve-tail-predication -tail-predication=enabled -mattr=+mve %s -S -o - | FileCheck %s -; CHECK-LABEL: reduction_i32 -; CHECK: phi i32 [ 0, %vector.ph ] -; CHECK: phi <8 x i16> [ zeroinitializer, %vector.ph ] -; CHECK: phi i32 -; CHECK: [[PHI:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[ELEMS:%[^ ]+]], %vector.body ] -; CHECK: [[VCTP:%[^ ]+]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[PHI]]) -; CHECK: [[ELEMS]] = sub i32 [[PHI]], 8 -; CHECK: call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp2, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef) -; CHECK: call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp5, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef) define i16 @reduction_i32(ptr nocapture readonly %A, ptr nocapture readonly %B, i32 %N) { +; CHECK-LABEL: define i16 @reduction_i32( +; CHECK-SAME: ptr readonly captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_COND_CLEANUP:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP:%.*]] = add i32 [[N]], -1 +; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP]], 8 +; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -8 +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N_VEC]], -8 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[TMP0]], 3 +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], 1 +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP2]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i16> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ [[N]], %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INSTTMP2:%.*]] = getelementptr inbounds i16, ptr [[A]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[TMP4]]) +; CHECK-NEXT: [[TMP6]] = sub i32 [[TMP4]], 8 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[INSTTMP2]], i32 4, <8 x i1> [[TMP5]], <8 x i16> undef) +; CHECK-NEXT: [[INSTTMP5:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[INSTTMP5]], i32 4, <8 x i1> [[TMP5]], <8 x i16> undef) +; CHECK-NEXT: [[TMP7:%.*]] = add <8 x i16> [[WIDE_MASKED_LOAD]], [[VEC_PHI]] +; CHECK-NEXT: [[TMP8]] = add <8 x i16> [[TMP7]], [[WIDE_MASKED_LOAD3]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 8 +; CHECK-NEXT: [[TMP9]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP3]], i32 1) +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP9]], 0 +; CHECK-NEXT: br i1 [[TMP12]], label %[[VECTOR_BODY]], label %[[MIDDLE_BLOCK:.*]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[VEC_PHI_LCSSA:%.*]] = phi <8 x i16> [ [[VEC_PHI]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[DOTLCSSA3:%.*]] = phi <8 x i1> [ [[TMP5]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi <8 x i16> [ [[TMP8]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = select <8 x i1> [[DOTLCSSA3]], <8 x i16> [[DOTLCSSA]], <8 x i16> [[VEC_PHI_LCSSA]] +; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i16> [[TMP10]], <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <8 x i16> [[RDX_SHUF]], [[TMP10]] +; CHECK-NEXT: [[RDX_SHUF4:%.*]] = shufflevector <8 x i16> [[BIN_RDX]], <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[BIN_RDX5:%.*]] = add <8 x i16> [[RDX_SHUF4]], [[BIN_RDX]] +; CHECK-NEXT: [[RDX_SHUF6:%.*]] = shufflevector <8 x i16> [[BIN_RDX5]], <8 x i16> undef, <8 x i32> <i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <8 x i16> [[RDX_SHUF6]], [[BIN_RDX5]] +; CHECK-NEXT: [[TMP11:%.*]] = extractelement <8 x i16> [[BIN_RDX7]], i32 0 +; CHECK-NEXT: ret i16 [[TMP11]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: [[RES_0:%.*]] = phi i16 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: ret i16 [[RES_0]] +; entry: %cmp8 = icmp eq i32 %N, 0 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph @@ -59,16 +99,52 @@ for.cond.cleanup: ret i16 %res.0 } -; CHECK-LABEL: reduction_i32_with_scalar -; CHECK: vector.body: -; CHECK: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] -; CHECK: %vec.phi = phi <8 x i16> [ zeroinitializer, %vector.ph ], [ %{{.*}}, %vector.body ] -; CHECK: %{{.*}} = phi i32 [ %{{.*}}, %vector.ph ], [ %{{.*}}, %vector.body ] -; CHECK: [[PHI:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[ELEMS:%[^ ]+]], %vector.body ] -; CHECK: [[VCTP:%[^ ]+]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[PHI]]) -; CHECK: [[ELEMS]] = sub i32 [[PHI]], 8 -; CHECK: call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp2, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef) define i16 @reduction_i32_with_scalar(ptr nocapture readonly %A, i16 %B, i32 %N) local_unnamed_addr { +; CHECK-LABEL: define i16 @reduction_i32_with_scalar( +; CHECK-SAME: ptr readonly captures(none) [[A:%.*]], i16 [[B:%.*]], i32 [[N:%.*]]) local_unnamed_addr #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_COND_CLEANUP:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP:%.*]] = add i32 [[N]], -1 +; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw nsw i32 [[TMP]], 8 +; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -8 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <8 x i16> undef, i16 [[B]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT3]], <8 x i16> undef, <8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N_VEC]], -8 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[TMP0]], 3 +; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i32 [[TMP1]], 1 +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP2]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i16> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[INSTTMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ [[N]], %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INSTTMP2:%.*]] = getelementptr inbounds i16, ptr [[A]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[TMP4]]) +; CHECK-NEXT: [[TMP6]] = sub i32 [[TMP4]], 8 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[INSTTMP2]], i32 4, <8 x i1> [[TMP5]], <8 x i16> undef) +; CHECK-NEXT: [[INSTTMP5:%.*]] = add <8 x i16> [[VEC_PHI]], [[BROADCAST_SPLAT4]] +; CHECK-NEXT: [[INSTTMP6]] = add <8 x i16> [[INSTTMP5]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 8 +; CHECK-NEXT: [[TMP7]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP3]], i32 1) +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label %[[VECTOR_BODY]], label %[[MIDDLE_BLOCK:.*]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[INSTTMP8:%.*]] = select <8 x i1> [[TMP5]], <8 x i16> [[INSTTMP6]], <8 x i16> [[VEC_PHI]] +; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i16> [[INSTTMP8]], <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <8 x i16> [[RDX_SHUF]], [[INSTTMP8]] +; CHECK-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <8 x i16> [[BIN_RDX]], <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[BIN_RDX6:%.*]] = add <8 x i16> [[RDX_SHUF5]], [[BIN_RDX]] +; CHECK-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <8 x i16> [[BIN_RDX6]], <8 x i16> undef, <8 x i32> <i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <8 x i16> [[RDX_SHUF7]], [[BIN_RDX6]] +; CHECK-NEXT: [[TMP9:%.*]] = extractelement <8 x i16> [[BIN_RDX8]], i32 0 +; CHECK-NEXT: ret i16 [[TMP9]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: [[RES_0:%.*]] = phi i16 [ 0, %[[ENTRY]] ] +; CHECK-NEXT: ret i16 [[RES_0]] +; entry: %cmp8 = icmp eq i32 %N, 0 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph @@ -119,15 +195,46 @@ for.cond.cleanup: ; despite this we can still calculate a precise enough range so that the ; the overflow checks for get.active.active.lane.mask don't reject ; tail-predication. -; -; CHECK-LABEL: @reduction_not_guarded -; -; CHECK: vector.body: -; CHECK: @llvm.arm.mve.vctp -; CHECK-NOT: @llvm.get.active.lane.mask.v8i1.i32 -; CHECK: ret -; define i16 @reduction_not_guarded(ptr nocapture readonly %A, i16 %B, i32 %N) local_unnamed_addr { +; CHECK-LABEL: define i16 @reduction_not_guarded( +; CHECK-SAME: ptr readonly captures(none) [[A:%.*]], i16 [[B:%.*]], i32 [[N:%.*]]) local_unnamed_addr #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP:%.*]] = add i32 [[N]], -1 +; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw nsw i32 [[TMP]], 8 +; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -8 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <8 x i16> undef, i16 [[B]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT3]], <8 x i16> undef, <8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N_VEC]], -8 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[TMP0]], 3 +; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i32 [[TMP1]], 1 +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP2]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i16> [ zeroinitializer, %[[ENTRY]] ], [ [[INSTTMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[START]], %[[ENTRY]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ [[N]], %[[ENTRY]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INSTTMP2:%.*]] = getelementptr inbounds i16, ptr [[A]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[TMP4]]) +; CHECK-NEXT: [[TMP6]] = sub i32 [[TMP4]], 8 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[INSTTMP2]], i32 4, <8 x i1> [[TMP5]], <8 x i16> undef) +; CHECK-NEXT: [[INSTTMP5:%.*]] = add <8 x i16> [[VEC_PHI]], [[BROADCAST_SPLAT4]] +; CHECK-NEXT: [[INSTTMP6]] = add <8 x i16> [[INSTTMP5]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 8 +; CHECK-NEXT: [[TMP7]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP3]], i32 1) +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label %[[VECTOR_BODY]], label %[[MIDDLE_BLOCK:.*]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[INSTTMP8:%.*]] = select <8 x i1> [[TMP5]], <8 x i16> [[INSTTMP6]], <8 x i16> [[VEC_PHI]] +; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i16> [[INSTTMP8]], <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <8 x i16> [[RDX_SHUF]], [[INSTTMP8]] +; CHECK-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <8 x i16> [[BIN_RDX]], <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[BIN_RDX6:%.*]] = add <8 x i16> [[RDX_SHUF5]], [[BIN_RDX]] +; CHECK-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <8 x i16> [[BIN_RDX6]], <8 x i16> undef, <8 x i32> <i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <8 x i16> [[RDX_SHUF7]], [[BIN_RDX6]] +; CHECK-NEXT: [[TMP9:%.*]] = extractelement <8 x i16> [[BIN_RDX8]], i32 0 +; CHECK-NEXT: ret i16 [[TMP9]] +; entry: %tmp = add i32 %N, -1 %n.rnd.up = add nuw nsw i32 %tmp, 8 @@ -166,12 +273,76 @@ middle.block: ; preds = %vector.body ret i16 %tmp9 } -; CHECK-LABEL: @Correlation -; CHECK: vector.body: -; CHECK: @llvm.arm.mve.vctp -; CHECK-NOT: %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask -; define dso_local void @Correlation(ptr nocapture readonly %Input, ptr nocapture %Output, i16 signext %Size, i16 signext %N, i16 signext %Scale) local_unnamed_addr #0 { +; CHECK-LABEL: define dso_local void @Correlation( +; CHECK-SAME: ptr readonly captures(none) [[INPUT:%.*]], ptr captures(none) [[OUTPUT:%.*]], i16 signext [[SIZE:%.*]], i16 signext [[N:%.*]], i16 signext [[SCALE:%.*]]) local_unnamed_addr #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[N]] to i32 +; CHECK-NEXT: [[CMP36:%.*]] = icmp sgt i16 [[N]], 0 +; CHECK-NEXT: br i1 [[CMP36]], label %[[FOR_BODY_LR_PH:.*]], label %[[FOR_END17:.*]] +; CHECK: [[FOR_BODY_LR_PH]]: +; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[SIZE]] to i32 +; CHECK-NEXT: [[CONV1032:%.*]] = zext i16 [[SCALE]] to i32 +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[CONV2]], 3 +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[LSR_IV51:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], %[[FOR_END:.*]] ], [ [[TMP0]], %[[FOR_BODY_LR_PH]] ] +; CHECK-NEXT: [[LSR_IV46:%.*]] = phi ptr [ [[SCEVGEP47:%.*]], %[[FOR_END]] ], [ [[INPUT]], %[[FOR_BODY_LR_PH]] ] +; CHECK-NEXT: [[I_037:%.*]] = phi i32 [ 0, %[[FOR_BODY_LR_PH]] ], [ [[INC16:%.*]], %[[FOR_END]] ] +; CHECK-NEXT: [[TMP1:%.*]] = mul nsw i32 [[I_037]], -1 +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[CONV2]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = mul nsw i32 [[I_037]], -1 +; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP0]], [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = lshr i32 [[TMP4]], 2 +; CHECK-NEXT: [[TMP6:%.*]] = shl nuw i32 [[TMP5]], 2 +; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], -4 +; CHECK-NEXT: [[TMP8:%.*]] = lshr i32 [[TMP7]], 2 +; CHECK-NEXT: [[TMP9:%.*]] = add nuw nsw i32 [[TMP8]], 1 +; CHECK-NEXT: [[CMP433:%.*]] = icmp slt i32 [[I_037]], [[CONV2]] +; CHECK-NEXT: br i1 [[CMP433]], label %[[VECTOR_PH:.*]], label %[[FOR_END]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP9]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[LSR_IV48:%.*]] = phi ptr [ [[SCEVGEP49:%.*]], %[[VECTOR_BODY]] ], [ [[LSR_IV46]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[VECTOR_BODY]] ], [ [[INPUT]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP20:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP21:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP11:%.*]] = phi i32 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[TMP13:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP12:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP11]]) +; CHECK-NEXT: [[TMP13]] = sub i32 [[TMP11]], 4 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[LSR_IV]], i32 2, <4 x i1> [[TMP12]], <4 x i16> undef) +; CHECK-NEXT: [[TMP14:%.*]] = sext <4 x i16> [[WIDE_MASKED_LOAD]] to <4 x i32> +; CHECK-NEXT: [[WIDE_MASKED_LOAD42:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[LSR_IV48]], i32 2, <4 x i1> [[TMP12]], <4 x i16> undef) +; CHECK-NEXT: [[TMP15:%.*]] = sext <4 x i16> [[WIDE_MASKED_LOAD42]] to <4 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = mul nsw <4 x i32> [[TMP15]], [[TMP14]] +; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> undef, i32 [[CONV1032]], i32 0 +; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <4 x i32> [[TMP17]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = ashr <4 x i32> [[TMP16]], [[TMP18]] +; CHECK-NEXT: [[TMP20]] = add <4 x i32> [[TMP19]], [[VEC_PHI]] +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i16, ptr [[LSR_IV]], i32 4 +; CHECK-NEXT: [[SCEVGEP49]] = getelementptr i16, ptr [[LSR_IV48]], i32 4 +; CHECK-NEXT: [[TMP21]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP10]], i32 1) +; CHECK-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 +; CHECK-NEXT: br i1 [[TMP22]], label %[[VECTOR_BODY]], label %[[MIDDLE_BLOCK:.*]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP23:%.*]] = select <4 x i1> [[TMP12]], <4 x i32> [[TMP20]], <4 x i32> [[VEC_PHI]] +; CHECK-NEXT: [[TMP24:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP23]]) +; CHECK-NEXT: br label %[[FOR_END]] +; CHECK: [[FOR_END]]: +; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, %[[FOR_BODY]] ], [ [[TMP24]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[TMP25:%.*]] = lshr i32 [[SUM_0_LCSSA]], 16 +; CHECK-NEXT: [[CONV13:%.*]] = trunc i32 [[TMP25]] to i16 +; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i16, ptr [[OUTPUT]], i32 [[I_037]] +; CHECK-NEXT: store i16 [[CONV13]], ptr [[ARRAYIDX14]], align 2 +; CHECK-NEXT: [[INC16]] = add nuw nsw i32 [[I_037]], 1 +; CHECK-NEXT: [[SCEVGEP47]] = getelementptr i16, ptr [[LSR_IV46]], i32 1 +; CHECK-NEXT: [[LSR_IV_NEXT]] = add i32 [[LSR_IV51]], -1 +; CHECK-NEXT: [[EXITCOND39:%.*]] = icmp eq i32 [[INC16]], [[CONV]] +; CHECK-NEXT: br i1 [[EXITCOND39]], label %[[FOR_END17]], label %[[FOR_BODY]] +; CHECK: [[FOR_END17]]: +; CHECK-NEXT: ret void +; entry: %conv = sext i16 %N to i32 %cmp36 = icmp sgt i16 %N, 0 diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-widen.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-widen.ll index a8ad360..b54d526 100644 --- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-widen.ll +++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-widen.ll @@ -1,8 +1,43 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -mtriple=thumbv8.1m.main -mve-tail-predication -tail-predication=enabled -mattr=+mve,+lob %s -S -o - | FileCheck %s -; CHECK-LABEL: expand_v8i16_v8i32 -; CHECK-NOT: call i32 @llvm.arm.mve.vctp define void @expand_v8i16_v8i32(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) { +; CHECK-LABEL: define void @expand_v8i16_v8i32( +; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], ptr noalias captures(none) [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[N]], 7 +; CHECK-NEXT: [[TMP9:%.*]] = lshr i32 [[TMP8]], 3 +; CHECK-NEXT: [[TMP10:%.*]] = shl nuw i32 [[TMP9]], 3 +; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], -8 +; CHECK-NEXT: [[TMP12:%.*]] = lshr i32 [[TMP11]], 3 +; CHECK-NEXT: [[TMP13:%.*]] = add nuw nsw i32 [[TMP12]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_COND_CLEANUP:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP13]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[N]], %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i16, ptr [[A]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[TMP0]]) +; CHECK-NEXT: [[TMP2]] = sub i32 [[TMP0]], 8 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP]], i32 4, <8 x i1> [[TMP1]], <8 x i16> undef) +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD2:%.*]] = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP3]], i32 4, <8 x i1> [[TMP1]], <8 x i16> undef) +; CHECK-NEXT: [[EXPAND_1:%.*]] = zext <8 x i16> [[WIDE_MASKED_LOAD]] to <8 x i32> +; CHECK-NEXT: [[EXPAND_2:%.*]] = zext <8 x i16> [[WIDE_MASKED_LOAD2]] to <8 x i32> +; CHECK-NEXT: [[MUL:%.*]] = mul nsw <8 x i32> [[EXPAND_2]], [[EXPAND_1]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C]], i32 [[INDEX]] +; CHECK-NEXT: tail call void @llvm.masked.store.v8i32.p0(<8 x i32> [[MUL]], ptr [[TMP6]], i32 4, <8 x i1> [[TMP1]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 8 +; CHECK-NEXT: [[TMP15]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP14]], i32 1) +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +; CHECK-NEXT: br i1 [[TMP16]], label %[[VECTOR_BODY]], label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp eq i32 %N, 0 %tmp8 = add i32 %N, 7 @@ -39,15 +74,57 @@ for.cond.cleanup: ; preds = %vector.body, %entry ret void } -; CHECK-LABEL: expand_v8i16_v4i32 -; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[ELEMS_REM:%[^ ]+]], %vector.body ] -; CHECK: [[VCTP:%[^ ]+]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[ELEMS]]) -; CHECK: [[ELEMS_REM]] = sub i32 [[ELEMS]], 8 -; CHECK: tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr {{.*}}, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef) -; CHECK: %store.pred = icmp ule <4 x i32> %induction.store -; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> %store.pred) -; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> %store.pred) define void @expand_v8i16_v4i32(ptr readonly %a, ptr readonly %b, ptr %c, ptr %d, i32 %N) { +; CHECK-LABEL: define void @expand_v8i16_v4i32( +; CHECK-SAME: ptr readonly [[A:%.*]], ptr readonly [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], i32 [[N:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[N]], 7 +; CHECK-NEXT: [[TMP9:%.*]] = lshr i32 [[TMP8]], 3 +; CHECK-NEXT: [[TMP10:%.*]] = shl nuw i32 [[TMP9]], 3 +; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], -8 +; CHECK-NEXT: [[TMP12:%.*]] = lshr i32 [[TMP11]], 3 +; CHECK-NEXT: [[TMP13:%.*]] = add nuw nsw i32 [[TMP12]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_COND_CLEANUP:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = add i32 [[N]], -1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT10_STORE:%.*]] = insertelement <4 x i32> undef, i32 [[TRIP_COUNT_MINUS_1]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT11_STORE:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT10_STORE]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP13]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[STORE_IDX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[STORE_IDX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[N]], %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i16, ptr [[A]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[TMP0]]) +; CHECK-NEXT: [[TMP2]] = sub i32 [[TMP0]], 8 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP]], i32 4, <8 x i1> [[TMP1]], <8 x i16> undef) +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD2:%.*]] = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP3]], i32 4, <8 x i1> [[TMP1]], <8 x i16> undef) +; CHECK-NEXT: [[EXTRACT_2_LOW:%.*]] = shufflevector <8 x i16> [[WIDE_MASKED_LOAD2]], <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[EXTRACT_2_HIGH:%.*]] = shufflevector <8 x i16> [[WIDE_MASKED_LOAD2]], <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> +; CHECK-NEXT: [[EXPAND_1:%.*]] = zext <4 x i16> [[EXTRACT_2_LOW]] to <4 x i32> +; CHECK-NEXT: [[EXPAND_2:%.*]] = zext <4 x i16> [[EXTRACT_2_HIGH]] to <4 x i32> +; CHECK-NEXT: [[MUL:%.*]] = mul nsw <4 x i32> [[EXPAND_2]], [[EXPAND_1]] +; CHECK-NEXT: [[SUB:%.*]] = mul nsw <4 x i32> [[EXPAND_1]], [[EXPAND_2]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT_STORE:%.*]] = insertelement <4 x i32> undef, i32 [[STORE_IDX]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT_STORE:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT_STORE]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[INDUCTION_STORE:%.*]] = add <4 x i32> [[BROADCAST_SPLAT_STORE]], <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: [[STORE_PRED:%.*]] = icmp ule <4 x i32> [[INDUCTION_STORE]], [[BROADCAST_SPLAT11_STORE]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C]], i32 [[STORE_IDX]] +; CHECK-NEXT: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> [[MUL]], ptr [[TMP6]], i32 4, <4 x i1> [[STORE_PRED]]) +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[D]], i32 [[STORE_IDX]] +; CHECK-NEXT: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> [[SUB]], ptr [[GEP]], i32 4, <4 x i1> [[STORE_PRED]]) +; CHECK-NEXT: [[STORE_IDX_NEXT]] = add i32 [[STORE_IDX]], 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 8 +; CHECK-NEXT: [[TMP15]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP14]], i32 1) +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +; CHECK-NEXT: br i1 [[TMP16]], label %[[VECTOR_BODY]], label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp eq i32 %N, 0 %tmp8 = add i32 %N, 7 @@ -98,9 +175,43 @@ for.cond.cleanup: ; preds = %vector.body, %entry ret void } -; CHECK-LABEL: expand_v4i32_v4i64 -; CHECK-NOT: call i32 @llvm.arm.mve.vctp define void @expand_v4i32_v4i64(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) { +; CHECK-LABEL: define void @expand_v4i32_v4i64( +; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], ptr noalias captures(none) [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[N]], 3 +; CHECK-NEXT: [[TMP9:%.*]] = lshr i32 [[TMP8]], 2 +; CHECK-NEXT: [[TMP10:%.*]] = shl nuw i32 [[TMP9]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], -4 +; CHECK-NEXT: [[TMP12:%.*]] = lshr i32 [[TMP11]], 2 +; CHECK-NEXT: [[TMP13:%.*]] = add nuw nsw i32 [[TMP12]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_COND_CLEANUP:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP13]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[N]], %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP0]]) +; CHECK-NEXT: [[TMP2]] = sub i32 [[TMP0]], 4 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD2:%.*]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP3]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[EXPAND_1:%.*]] = zext <4 x i32> [[WIDE_MASKED_LOAD]] to <4 x i64> +; CHECK-NEXT: [[EXPAND_2:%.*]] = zext <4 x i32> [[WIDE_MASKED_LOAD2]] to <4 x i64> +; CHECK-NEXT: [[MUL:%.*]] = mul nsw <4 x i64> [[EXPAND_2]], [[EXPAND_1]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[C]], i32 [[INDEX]] +; CHECK-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[MUL]], ptr [[TMP6]], i32 4, <4 x i1> [[TMP1]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP15]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP14]], i32 1) +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +; CHECK-NEXT: br i1 [[TMP16]], label %[[VECTOR_BODY]], label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: ret void +; entry: %cmp8 = icmp eq i32 %N, 0 %tmp8 = add i32 %N, 3 diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll index ec542df..fb1a4a4 100644 --- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll +++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll @@ -1,24 +1,47 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -mtriple=thumbv8.1m.main -mve-tail-predication -tail-predication=enabled -mattr=+mve %s -S -o - | FileCheck %s -; CHECK-LABEL: vec_mul_reduce_add - -; CHECK: vector.ph: -; CHECK: %start = call i32 @llvm.start.loop.iterations.i32 -; CHECK: br label %vector.body - -; CHECK: vector.body: -; CHECK: [[ELTS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[SUB:%[^ ]+]], %vector.body ] -; CHECK: [[VCTP:%[^ ]+]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[ELTS]]) -; CHECK: [[SUB]] = sub i32 [[ELTS]], 4 -; CHECK: call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]] -; CHECK: call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], - -; CHECK: middle.block: -; CHECK: [[VPSEL:%[^ ]+]] = select <4 x i1> [[VCTP]], -; CHECK: call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VPSEL]]) - define i32 @vec_mul_reduce_add(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, i32 %N) { +; CHECK-LABEL: define i32 @vec_mul_reduce_add( +; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[N]], 0 +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], 3 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], -4 +; CHECK-NEXT: [[TMP4:%.*]] = lshr i32 [[TMP3]], 2 +; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i32 [[TMP4]], 1 +; CHECK-NEXT: br i1 [[CMP8]], label %[[FOR_COND_CLEANUP:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP5]]) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[LSR_IV2:%.*]] = phi ptr [ [[SCEVGEP3:%.*]], %[[VECTOR_BODY]] ], [ [[A]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[VECTOR_BODY]] ], [ [[B]], %[[VECTOR_PH]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6:%.*]] = phi i32 [ [[START]], %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = phi i32 [ [[N]], %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP8:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP7]]) +; CHECK-NEXT: [[TMP9]] = sub i32 [[TMP7]], 4 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV2]], i32 4, <4 x i1> [[TMP8]], <4 x i32> undef) +; CHECK-NEXT: [[WIDE_MASKED_LOAD13:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV]], i32 4, <4 x i1> [[TMP8]], <4 x i32> undef) +; CHECK-NEXT: [[TMP10:%.*]] = mul nsw <4 x i32> [[WIDE_MASKED_LOAD13]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[TMP11]] = add nsw <4 x i32> [[TMP10]], [[VEC_PHI]] +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, ptr [[LSR_IV]], i32 4 +; CHECK-NEXT: [[SCEVGEP3]] = getelementptr i32, ptr [[LSR_IV2]], i32 4 +; CHECK-NEXT: [[TMP12]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP6]], i32 1) +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 +; CHECK-NEXT: br i1 [[TMP13]], label %[[VECTOR_BODY]], label %[[MIDDLE_BLOCK:.*]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP14:%.*]] = select <4 x i1> [[TMP8]], <4 x i32> [[TMP11]], <4 x i32> [[VEC_PHI]] +; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP14]]) +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[TMP15]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[RES_0_LCSSA]] +; entry: %cmp8 = icmp eq i32 %N, 0 %0 = add i32 %N, 3 diff --git a/llvm/test/CodeGen/Thumb2/carry.ll b/llvm/test/CodeGen/Thumb2/carry.ll index 1e2b332..47c7918 100644 --- a/llvm/test/CodeGen/Thumb2/carry.ll +++ b/llvm/test/CodeGen/Thumb2/carry.ll @@ -1,35 +1,52 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 ; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s define i64 @f1(i64 %a, i64 %b) { -entry: ; CHECK-LABEL: f1: -; CHECK: subs r0, r0, r2 -; CHECK: sbcs r1, r3 - %tmp = sub i64 %a, %b - ret i64 %tmp +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: subs r0, r0, r2 +; CHECK-NEXT: sbcs r1, r3 +; CHECK-NEXT: bx lr +entry: + %tmp = sub i64 %a, %b + ret i64 %tmp } define i64 @f2(i64 %a, i64 %b) { -entry: ; CHECK-LABEL: f2: -; CHECK: lsls r1, r1, #1 -; CHECK: orr.w r1, r1, r0, lsr #31 -; CHECK: rsbs r0, r2, r0, lsl #1 -; CHECK: sbcs r1, r3 - %tmp1 = shl i64 %a, 1 - %tmp2 = sub i64 %tmp1, %b - ret i64 %tmp2 +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: lsls r1, r1, #1 +; CHECK-NEXT: orr.w r1, r1, r0, lsr #31 +; CHECK-NEXT: rsbs r0, r2, r0, lsl #1 +; CHECK-NEXT: sbcs r1, r3 +; CHECK-NEXT: bx lr +entry: + %tmp1 = shl i64 %a, 1 + %tmp2 = sub i64 %tmp1, %b + ret i64 %tmp2 } ; rdar://12559385 define i64 @f3(i32 %vi) { -entry: ; CHECK-LABEL: f3: -; CHECK: movw [[REG:r[0-9]+]], #36102 -; CHECK: sbcs r{{[0-9]+}}, [[REG]] - %v0 = zext i32 %vi to i64 - %v1 = xor i64 %v0, -155057456198619 - %v4 = add i64 %v1, 155057456198619 - %v5 = add i64 %v4, %v1 - ret i64 %v5 +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: movw r1, #19493 +; CHECK-NEXT: movt r1, #57191 +; CHECK-NEXT: eors r0, r1 +; CHECK-NEXT: movw r2, #29433 +; CHECK-NEXT: movw r3, #46043 +; CHECK-NEXT: movw r1, #36102 +; CHECK-NEXT: movt r2, #65535 +; CHECK-NEXT: adds r0, r0, r0 +; CHECK-NEXT: movt r3, #8344 +; CHECK-NEXT: sbcs r2, r1 +; CHECK-NEXT: adds r0, r0, r3 +; CHECK-NEXT: adcs r1, r2 +; CHECK-NEXT: bx lr +entry: + %v0 = zext i32 %vi to i64 + %v1 = xor i64 %v0, -155057456198619 + %v4 = add i64 %v1, 155057456198619 + %v5 = add i64 %v4, %v1 + ret i64 %v5 } diff --git a/llvm/test/CodeGen/WebAssembly/bulk-memory.ll b/llvm/test/CodeGen/WebAssembly/bulk-memory.ll index ae170d7..d949068 100644 --- a/llvm/test/CodeGen/WebAssembly/bulk-memory.ll +++ b/llvm/test/CodeGen/WebAssembly/bulk-memory.ll @@ -104,6 +104,31 @@ define void @memset_i32(ptr %dest, i8 %val, i32 %len) { ret void } +; CHECK-LABEL: memcpy_0: +; CHECK-NEXT: .functype memcpy_0 (i32, i32) -> () +; CHECK-NEXT: return +define void @memcpy_0(ptr %dest, ptr %src) { + call void @llvm.memcpy.p0.p0.i32(ptr %dest, ptr %src, i32 0, i1 0) + ret void +} + +; CHECK-LABEL: memmove_0: +; CHECK-NEXT: .functype memmove_0 (i32, i32) -> () +; CHECK-NEXT: return +define void @memmove_0(ptr %dest, ptr %src) { + call void @llvm.memmove.p0.p0.i32(ptr %dest, ptr %src, i32 0, i1 0) + ret void +} + +; CHECK-LABEL: memset_0: +; NO-BULK-MEM-NOT: memory.fill +; BULK-MEM-NEXT: .functype memset_0 (i32, i32) -> () +; BULK-MEM-NEXT: return +define void @memset_0(ptr %dest, i8 %val) { + call void @llvm.memset.p0.i32(ptr %dest, i8 %val, i32 0, i1 0) + ret void +} + ; CHECK-LABEL: memcpy_1: ; CHECK-NEXT: .functype memcpy_1 (i32, i32) -> () ; CHECK-NEXT: i32.load8_u $push[[L0:[0-9]+]]=, 0($1) @@ -137,14 +162,8 @@ define void @memset_1(ptr %dest, i8 %val) { ; CHECK-LABEL: memcpy_1024: ; NO-BULK-MEM-NOT: memory.copy ; BULK-MEM-NEXT: .functype memcpy_1024 (i32, i32) -> () -; BULK-MEM-NEXT: block ; BULK-MEM-NEXT: i32.const $push[[L0:[0-9]+]]=, 1024 -; BULK-MEM-NEXT: i32.eqz $push[[L1:[0-9]+]]=, $pop[[L0]] -; BULK-MEM-NEXT: br_if 0, $pop[[L1]] -; BULK-MEM-NEXT: i32.const $push[[L2:[0-9]+]]=, 1024 -; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L2]] -; BULK-MEM-NEXT: .LBB{{.*}}: -; BULK-MEM-NEXT: end_block +; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L0]] ; BULK-MEM-NEXT: return define void @memcpy_1024(ptr %dest, ptr %src) { call void @llvm.memcpy.p0.p0.i32(ptr %dest, ptr %src, i32 1024, i1 0) @@ -154,14 +173,8 @@ define void @memcpy_1024(ptr %dest, ptr %src) { ; CHECK-LABEL: memmove_1024: ; NO-BULK-MEM-NOT: memory.copy ; BULK-MEM-NEXT: .functype memmove_1024 (i32, i32) -> () -; BULK-MEM-NEXT: block ; BULK-MEM-NEXT: i32.const $push[[L0:[0-9]+]]=, 1024 -; BULK-MEM-NEXT: i32.eqz $push[[L1:[0-9]+]]=, $pop[[L0]] -; BULK-MEM-NEXT: br_if 0, $pop[[L1]] -; BULK-MEM-NEXT: i32.const $push[[L2:[0-9]+]]=, 1024 -; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L2]] -; BULK-MEM-NEXT: .LBB{{.*}}: -; BULK-MEM-NEXT: end_block +; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L0]] ; BULK-MEM-NEXT: return define void @memmove_1024(ptr %dest, ptr %src) { call void @llvm.memmove.p0.p0.i32(ptr %dest, ptr %src, i32 1024, i1 0) @@ -171,14 +184,8 @@ define void @memmove_1024(ptr %dest, ptr %src) { ; CHECK-LABEL: memset_1024: ; NO-BULK-MEM-NOT: memory.fill ; BULK-MEM-NEXT: .functype memset_1024 (i32, i32) -> () -; BULK-MEM-NEXT: block ; BULK-MEM-NEXT: i32.const $push[[L0:[0-9]+]]=, 1024 -; BULK-MEM-NEXT: i32.eqz $push[[L1:[0-9]+]]=, $pop[[L0]] -; BULK-MEM-NEXT: br_if 0, $pop[[L1]] -; BULK-MEM-NEXT: i32.const $push[[L2:[0-9]+]]=, 1024 -; BULK-MEM-NEXT: memory.fill 0, $0, $1, $pop[[L2]] -; BULK-MEM-NEXT: .LBB{{.*}}: -; BULK-MEM-NEXT: end_block +; BULK-MEM-NEXT: memory.fill 0, $0, $1, $pop[[L0]] ; BULK-MEM-NEXT: return define void @memset_1024(ptr %dest, i8 %val) { call void @llvm.memset.p0.i32(ptr %dest, i8 %val, i32 1024, i1 0) @@ -201,17 +208,11 @@ define void @memset_1024(ptr %dest, i8 %val) { ; BULK-MEM-NEXT: .functype memcpy_alloca_src (i32) -> () ; BULK-MEM-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer ; BULK-MEM-NEXT: i32.const $push[[L1:[0-9]+]]=, 112 -; BULK-MEM-NEXT: i32.sub $[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]] -; BULK-MEM-NEXT: block -; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 100 -; BULK-MEM-NEXT: i32.eqz $push[[L4:[0-9]+]]=, $pop[[L3]] -; BULK-MEM-NEXT: br_if 0, $pop[[L4]] -; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 12 -; BULK-MEM-NEXT: i32.add $push[[L6:[0-9]+]]=, $[[L2]], $pop[[L5]] -; BULK-MEM-NEXT: i32.const $push[[L7:[0-9]+]]=, 100 -; BULK-MEM-NEXT: memory.copy 0, 0, $0, $pop[[L6]], $pop[[L7]] -; BULK-MEM-NEXT: .LBB{{.*}}: -; BULK-MEM-NEXT: end_block +; BULK-MEM-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]] +; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 12 +; BULK-MEM-NEXT: i32.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]] +; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 100 +; BULK-MEM-NEXT: memory.copy 0, 0, $0, $pop[[L4]], $pop[[L5]] ; BULK-MEM-NEXT: return define void @memcpy_alloca_src(ptr %dst) { %a = alloca [100 x i8] @@ -224,17 +225,11 @@ define void @memcpy_alloca_src(ptr %dst) { ; BULK-MEM-NEXT: .functype memcpy_alloca_dst (i32) -> () ; BULK-MEM-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer ; BULK-MEM-NEXT: i32.const $push[[L1:[0-9]+]]=, 112 -; BULK-MEM-NEXT: i32.sub $[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]] -; BULK-MEM-NEXT: block -; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 100 -; BULK-MEM-NEXT: i32.eqz $push[[L4:[0-9]+]]=, $pop[[L3]] -; BULK-MEM-NEXT: br_if 0, $pop[[L4]] -; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 12 -; BULK-MEM-NEXT: i32.add $push[[L6:[0-9]+]]=, $[[L2]], $pop[[L5]] -; BULK-MEM-NEXT: i32.const $push[[L7:[0-9]+]]=, 100 -; BULK-MEM-NEXT: memory.copy 0, 0, $pop[[L6]], $0, $pop[[L7]] -; BULK-MEM-NEXT: .LBB{{.*}}: -; BULK-MEM-NEXT: end_block +; BULK-MEM-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]] +; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 12 +; BULK-MEM-NEXT: i32.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]] +; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 100 +; BULK-MEM-NEXT: memory.copy 0, 0, $pop[[L4]], $0, $pop[[L5]] ; BULK-MEM-NEXT: return define void @memcpy_alloca_dst(ptr %src) { %a = alloca [100 x i8] @@ -247,17 +242,11 @@ define void @memcpy_alloca_dst(ptr %src) { ; BULK-MEM-NEXT: .functype memset_alloca (i32) -> () ; BULK-MEM-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer ; BULK-MEM-NEXT: i32.const $push[[L1:[0-9]+]]=, 112 -; BULK-MEM-NEXT: i32.sub $1=, $pop[[L0]], $pop[[L1]] -; BULK-MEM-NEXT: block -; BULK-MEM-NEXT: i32.const $push[[L2:[0-9]+]]=, 100 -; BULK-MEM-NEXT: i32.eqz $push[[L3:[0-9]+]]=, $pop[[L2]] -; BULK-MEM-NEXT: br_if 0, $pop[[L3]] -; BULK-MEM-NEXT: i32.const $push[[L4:[0-9]+]]=, 12 -; BULK-MEM-NEXT: i32.add $push[[L5:[0-9]+]]=, $1, $pop[[L4]] -; BULK-MEM-NEXT: i32.const $push[[L6:[0-9]+]]=, 100 -; BULK-MEM-NEXT: memory.fill 0, $pop[[L5]], $0, $pop[[L6]] -; BULK-MEM-NEXT: .LBB{{.*}}: -; BULK-MEM-NEXT: end_block +; BULK-MEM-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]] +; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 12 +; BULK-MEM-NEXT: i32.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]] +; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 100 +; BULK-MEM-NEXT: memory.fill 0, $pop[[L4]], $0, $pop[[L5]] ; BULK-MEM-NEXT: return define void @memset_alloca(i8 %val) { %a = alloca [100 x i8] diff --git a/llvm/test/CodeGen/WebAssembly/bulk-memory64.ll b/llvm/test/CodeGen/WebAssembly/bulk-memory64.ll index 0cf8493..d0206a3 100644 --- a/llvm/test/CodeGen/WebAssembly/bulk-memory64.ll +++ b/llvm/test/CodeGen/WebAssembly/bulk-memory64.ll @@ -110,6 +110,31 @@ define void @memset_i32(ptr %dest, i8 %val, i64 %len) { ret void } +; CHECK-LABEL: memcpy_0: +; CHECK-NEXT: .functype memcpy_0 (i64, i64) -> () +; CHECK-NEXT: return +define void @memcpy_0(ptr %dest, ptr %src) { + call void @llvm.memcpy.p0.p0.i64(ptr %dest, ptr %src, i64 0, i1 0) + ret void +} + +; CHECK-LABEL: memmove_0: +; CHECK-NEXT: .functype memmove_0 (i64, i64) -> () +; CHECK-NEXT: return +define void @memmove_0(ptr %dest, ptr %src) { + call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 0, i1 0) + ret void +} + +; CHECK-LABEL: memset_0: +; NO-BULK-MEM-NOT: memory.fill +; BULK-MEM-NEXT: .functype memset_0 (i64, i32) -> () +; BULK-MEM-NEXT: return +define void @memset_0(ptr %dest, i8 %val) { + call void @llvm.memset.p0.i64(ptr %dest, i8 %val, i64 0, i1 0) + ret void +} + ; CHECK-LABEL: memcpy_1: ; CHECK-NEXT: .functype memcpy_1 (i64, i64) -> () ; CHECK-NEXT: i32.load8_u $push[[L0:[0-9]+]]=, 0($1) @@ -143,14 +168,8 @@ define void @memset_1(ptr %dest, i8 %val) { ; CHECK-LABEL: memcpy_1024: ; NO-BULK-MEM-NOT: memory.copy ; BULK-MEM-NEXT: .functype memcpy_1024 (i64, i64) -> () -; BULK-MEM-NEXT: block -; BULK-MEM-NEXT: i64.const $push[[L1:[0-9]+]]=, 1024 -; BULK-MEM-NEXT: i64.eqz $push0=, $pop[[L1]] -; BULK-MEM-NEXT: br_if 0, $pop0 ; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 1024 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L0]] -; BULK-MEM-NEXT: .LBB{{.*}}: -; BULK-MEM-NEXT: end_block ; BULK-MEM-NEXT: return define void @memcpy_1024(ptr %dest, ptr %src) { call void @llvm.memcpy.p0.p0.i64(ptr %dest, ptr %src, i64 1024, i1 0) @@ -160,14 +179,8 @@ define void @memcpy_1024(ptr %dest, ptr %src) { ; CHECK-LABEL: memmove_1024: ; NO-BULK-MEM-NOT: memory.copy ; BULK-MEM-NEXT: .functype memmove_1024 (i64, i64) -> () -; BULK-MEM-NEXT: block -; BULK-MEM-NEXT: i64.const $push[[L1:[0-9]+]]=, 1024 -; BULK-MEM-NEXT: i64.eqz $push0=, $pop[[L1]] -; BULK-MEM-NEXT: br_if 0, $pop0 ; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 1024 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L0]] -; BULK-MEM-NEXT: .LBB{{.*}}: -; BULK-MEM-NEXT: end_block ; BULK-MEM-NEXT: return define void @memmove_1024(ptr %dest, ptr %src) { call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 1024, i1 0) @@ -177,14 +190,8 @@ define void @memmove_1024(ptr %dest, ptr %src) { ; CHECK-LABEL: memset_1024: ; NO-BULK-MEM-NOT: memory.fill ; BULK-MEM-NEXT: .functype memset_1024 (i64, i32) -> () -; BULK-MEM-NEXT: block -; BULK-MEM-NEXT: i64.const $push[[L1:[0-9]+]]=, 1024 -; BULK-MEM-NEXT: i64.eqz $push0=, $pop[[L1]] -; BULK-MEM-NEXT: br_if 0, $pop0 ; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 1024 ; BULK-MEM-NEXT: memory.fill 0, $0, $1, $pop[[L0]] -; BULK-MEM-NEXT: .LBB{{.*}}: -; BULK-MEM-NEXT: end_block ; BULK-MEM-NEXT: return define void @memset_1024(ptr %dest, i8 %val) { call void @llvm.memset.p0.i64(ptr %dest, i8 %val, i64 1024, i1 0) @@ -207,17 +214,11 @@ define void @memset_1024(ptr %dest, i8 %val) { ; BULK-MEM-NEXT: .functype memcpy_alloca_src (i64) -> () ; BULK-MEM-NEXT: global.get $push[[L1:[0-9]+]]=, __stack_pointer ; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 112 -; BULK-MEM-NEXT: i64.sub $[[L2:[0-9]+]]=, $pop[[L1]], $pop[[L0]] -; BULK-MEM-NEXT: block -; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 100 -; BULK-MEM-NEXT: i64.eqz $push[[L4:[0-9]+]]=, $pop[[L3]] -; BULK-MEM-NEXT: br_if 0, $pop[[L4]] -; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 12 -; BULK-MEM-NEXT: i64.add $push[[L6:[0-9]+]]=, $[[L2]], $pop[[L5]] -; BULK-MEM-NEXT: i64.const $push[[L7:[0-9]+]]=, 100 -; BULK-MEM-NEXT: memory.copy 0, 0, $0, $pop[[L6]], $pop[[L7]] -; BULK-MEM-NEXT: .LBB{{.*}}: -; BULK-MEM-NEXT: end_block +; BULK-MEM-NEXT: i64.sub $push[[L2:[0-9]+]]=, $pop[[L1]], $pop[[L0]] +; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 12 +; BULK-MEM-NEXT: i64.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]] +; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 100 +; BULK-MEM-NEXT: memory.copy 0, 0, $0, $pop[[L4]], $pop[[L5]] ; BULK-MEM-NEXT: return define void @memcpy_alloca_src(ptr %dst) { %a = alloca [100 x i8] @@ -230,17 +231,11 @@ define void @memcpy_alloca_src(ptr %dst) { ; BULK-MEM-NEXT: .functype memcpy_alloca_dst (i64) -> () ; BULK-MEM-NEXT: global.get $push[[L1:[0-9]+]]=, __stack_pointer ; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 112 -; BULK-MEM-NEXT: i64.sub $[[L2:[0-9]+]]=, $pop[[L1]], $pop[[L0]] -; BULK-MEM-NEXT: block -; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 100 -; BULK-MEM-NEXT: i64.eqz $push[[L4:[0-9]+]]=, $pop[[L3]] -; BULK-MEM-NEXT: br_if 0, $pop[[L4]] -; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 12 -; BULK-MEM-NEXT: i64.add $push[[L6:[0-9]+]]=, $[[L2]], $pop[[L5]] -; BULK-MEM-NEXT: i64.const $push[[L7:[0-9]+]]=, 100 -; BULK-MEM-NEXT: memory.copy 0, 0, $pop[[L6]], $0, $pop[[L7]] -; BULK-MEM-NEXT: .LBB{{.*}}: -; BULK-MEM-NEXT: end_block +; BULK-MEM-NEXT: i64.sub $push[[L2:[0-9]+]]=, $pop[[L1]], $pop[[L0]] +; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 12 +; BULK-MEM-NEXT: i64.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]] +; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 100 +; BULK-MEM-NEXT: memory.copy 0, 0, $pop[[L4]], $0, $pop[[L5]] ; BULK-MEM-NEXT: return define void @memcpy_alloca_dst(ptr %src) { %a = alloca [100 x i8] @@ -253,17 +248,11 @@ define void @memcpy_alloca_dst(ptr %src) { ; BULK-MEM-NEXT: .functype memset_alloca (i32) -> () ; BULK-MEM-NEXT: global.get $push[[L1:[0-9]+]]=, __stack_pointer ; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 112 -; BULK-MEM-NEXT: i64.sub $1=, $pop[[L1]], $pop[[L0]] -; BULK-MEM-NEXT: block -; BULK-MEM-NEXT: i64.const $push[[L2:[0-9]+]]=, 100 -; BULK-MEM-NEXT: i64.eqz $push[[L3:[0-9]+]]=, $pop[[L2]] -; BULK-MEM-NEXT: br_if 0, $pop[[L3]] -; BULK-MEM-NEXT: i64.const $push[[L4:[0-9]+]]=, 12 -; BULK-MEM-NEXT: i64.add $push[[L5:[0-9]+]]=, $1, $pop[[L4]] -; BULK-MEM-NEXT: i64.const $push[[L6:[0-9]+]]=, 100 -; BULK-MEM-NEXT: memory.fill 0, $pop[[L5]], $0, $pop[[L6]] -; BULK-MEM-NEXT: .LBB{{.*}}: -; BULK-MEM-NEXT: end_block +; BULK-MEM-NEXT: i64.sub $push[[L2:[0-9]+]]=, $pop[[L1]], $pop[[L0]] +; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 12 +; BULK-MEM-NEXT: i64.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]] +; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 100 +; BULK-MEM-NEXT: memory.fill 0, $pop[[L4]], $0, $pop[[L5]] ; BULK-MEM-NEXT: return define void @memset_alloca(i8 %val) { %a = alloca [100 x i8] diff --git a/llvm/test/CodeGen/WebAssembly/int-mac-reduction-loops.ll b/llvm/test/CodeGen/WebAssembly/int-mac-reduction-loops.ll index 04a2268..314e1b4 100644 --- a/llvm/test/CodeGen/WebAssembly/int-mac-reduction-loops.ll +++ b/llvm/test/CodeGen/WebAssembly/int-mac-reduction-loops.ll @@ -1,5 +1,6 @@ ; RUN: opt -mattr=+simd128 -passes=loop-vectorize %s | llc -mtriple=wasm32 -mattr=+simd128 -verify-machineinstrs -o - | FileCheck %s ; RUN: opt -mattr=+simd128 -passes=loop-vectorize -vectorizer-maximize-bandwidth %s | llc -mtriple=wasm32 -mattr=+simd128 -verify-machineinstrs -o - | FileCheck %s --check-prefix=MAX-BANDWIDTH +; RUN: opt -mattr=+simd128,+relaxed-simd -passes=loop-vectorize -vectorizer-maximize-bandwidth %s | llc -mtriple=wasm32 -mattr=+simd128,+relaxed-simd -verify-machineinstrs -o - | FileCheck %s --check-prefix=RELAXED-MAX-BANDWIDTH target triple = "wasm32" @@ -23,6 +24,10 @@ define hidden i32 @i32_mac_s8(ptr nocapture noundef readonly %a, ptr nocapture n ; MAX-BANDWIDTH: i32x4.add ; MAX-BANDWIDTH: i32x4.add +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i32x4.relaxed_dot_i8x16_i7x16_add_s + entry: %cmp7.not = icmp eq i32 %N, 0 br i1 %cmp7.not, label %for.cond.cleanup, label %for.body @@ -47,6 +52,109 @@ for.body: ; preds = %entry, %for.body br i1 %exitcond.not, label %for.cond.cleanup, label %for.body } +define hidden i32 @i32_mac_u8_s8(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %N) { +; CHECK-LABEL: i32_mac_u8_s8: +; CHECK: loop +; CHECK: v128.load32_zero +; CHECK: i16x8.extend_low_i8x16_u +; CHECK: i32x4.extend_low_i16x8_u +; CHECK: v128.load32_zero +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: i32x4.mul +; CHECK: i32x4.add + +; MAX-BANDWIDTH: loop +; MAX-BANDWIDTH: v128.load +; MAX-BANDWIDTH: i8x16.shuffle 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; MAX-BANDWIDTH: i16x8.extend_low_i8x16_u +; MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; MAX-BANDWIDTH: v128.load +; MAX-BANDWIDTH: i8x16.shuffle 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; MAX-BANDWIDTH: i16x8.extend_low_i8x16_s +; MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; MAX-BANDWIDTH: i32x4.mul +; MAX-BANDWIDTH: i32x4.add +; MAX-BANDWIDTH: i8x16.shuffle 8, 9, 10, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; MAX-BANDWIDTH: i16x8.extend_low_i8x16_u +; MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; MAX-BANDWIDTH: i8x16.shuffle 8, 9, 10, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; MAX-BANDWIDTH: i16x8.extend_low_i8x16_s +; MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; MAX-BANDWIDTH: i32x4.mul +; MAX-BANDWIDTH: i32x4.add +; MAX-BANDWIDTH: i8x16.shuffle 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; MAX-BANDWIDTH: i16x8.extend_low_i8x16_u +; MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; MAX-BANDWIDTH: i8x16.shuffle 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; MAX-BANDWIDTH: i16x8.extend_low_i8x16_s +; MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; MAX-BANDWIDTH: i32x4.mul +; MAX-BANDWIDTH: i32x4.add +; MAX-BANDWIDTH: i16x8.extend_low_i8x16_u +; MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; MAX-BANDWIDTH: i16x8.extend_low_i8x16_s +; MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; MAX-BANDWIDTH: i32x4.mul +; MAX-BANDWIDTH: i32x4.add + +; RELAXED-MAX-BANDWIDTH: loop +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; RELAXED-MAX-BANDWIDTH: i16x8.extend_low_i8x16_u +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; RELAXED-MAX-BANDWIDTH: i16x8.extend_low_i8x16_s +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: i32x4.mul +; RELAXED-MAX-BANDWIDTH: i32x4.add +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 8, 9, 10, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; RELAXED-MAX-BANDWIDTH: i16x8.extend_low_i8x16_u +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 8, 9, 10, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; RELAXED-MAX-BANDWIDTH: i16x8.extend_low_i8x16_s +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: i32x4.mul +; RELAXED-MAX-BANDWIDTH: i32x4.add +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; RELAXED-MAX-BANDWIDTH: i16x8.extend_low_i8x16_u +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; RELAXED-MAX-BANDWIDTH: i16x8.extend_low_i8x16_s +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: i32x4.mul +; RELAXED-MAX-BANDWIDTH: i32x4.add +; RELAXED-MAX-BANDWIDTH: i16x8.extend_low_i8x16_u +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: i16x8.extend_low_i8x16_s +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: i32x4.mul +; RELAXED-MAX-BANDWIDTH: i32x4.add +entry: + %cmp7.not = icmp eq i32 %N, 0 + br i1 %cmp7.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + %res.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ] + ret i32 %res.0.lcssa + +for.body: ; preds = %entry, %for.body + %i.09 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %res.08 = phi i32 [ %add, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i8, ptr %a, i32 %i.09 + %0 = load i8, ptr %arrayidx, align 1 + %conv = sext i8 %0 to i32 + %arrayidx1 = getelementptr inbounds i8, ptr %b, i32 %i.09 + %1 = load i8, ptr %arrayidx1, align 1 + %conv2 = zext i8 %1 to i32 + %mul = mul nsw i32 %conv2, %conv + %add = add nsw i32 %mul, %res.08 + %inc = add nuw i32 %i.09, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + define hidden i32 @i32_mac_s16(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %N) { ; CHECK-LABEL: i32_mac_s16: ; CHECK: i32x4.load16x4_s 0:p2align=1 @@ -57,6 +165,12 @@ define hidden i32 @i32_mac_s16(ptr nocapture noundef readonly %a, ptr nocapture ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i32x4.dot_i16x8_s +; MAX-BANDWIDTH: i32x4.add + +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i32x4.dot_i16x8_s +; RELAXED-MAX-BANDWIDTH: i32x4.add entry: %cmp7.not = icmp eq i32 %N, 0 @@ -116,6 +230,31 @@ define hidden i64 @i64_mac_s16(ptr nocapture noundef readonly %a, ptr nocapture ; MAX-BANDWIDTH: i64x2.extmul_low_i32x4_s ; MAX-BANDWIDTH: i64x2.add +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 12, 13, 14, 15, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 12, 13, 14, 15, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: i64x2.extmul_low_i32x4_s +; RELAXED-MAX-BANDWIDTH: i64x2.add +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 8, 9, 10, 11, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 8, 9, 10, 11, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: i64x2.extmul_low_i32x4_s +; RELAXED-MAX-BANDWIDTH: i64x2.add +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 4, 5, 6, 7, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 4, 5, 6, 7, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: i64x2.extmul_low_i32x4_s +; RELAXED-MAX-BANDWIDTH: i64x2.add +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: i64x2.extmul_low_i32x4_s +; RELAXED-MAX-BANDWIDTH: i64x2.add + entry: %cmp7.not = icmp eq i32 %N, 0 br i1 %cmp7.not, label %for.cond.cleanup, label %for.body @@ -156,6 +295,14 @@ define hidden i64 @i64_mac_s32(ptr nocapture noundef readonly %a, ptr nocapture ; MAX-BANDWIDTH: i64x2.extend_low_i32x4_s ; MAX-BANDWIDTH: i64x2.add +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i32x4.mul +; RELAXED-MAX-BANDWIDTH: i64x2.extend_high_i32x4_s +; RELAXED-MAX-BANDWIDTH: i64x2.add +; RELAXED-MAX-BANDWIDTH: i64x2.extend_low_i32x4_s +; RELAXED-MAX-BANDWIDTH: i64x2.add + entry: %cmp6.not = icmp eq i32 %N, 0 br i1 %cmp6.not, label %for.cond.cleanup, label %for.body @@ -197,6 +344,15 @@ define hidden i32 @i32_mac_u8(ptr nocapture noundef readonly %a, ptr nocapture n ; MAX-BANDWIDTH: i32x4.add ; MAX-BANDWIDTH: i32x4.add +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i16x8.extmul_low_i8x16_u +; RELAXED-MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u +; RELAXED-MAX-BANDWIDTH: i16x8.extmul_high_i8x16_u +; RELAXED-MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u +; RELAXED-MAX-BANDWIDTH: i32x4.add +; RELAXED-MAX-BANDWIDTH: i32x4.add + entry: %cmp7.not = icmp eq i32 %N, 0 br i1 %cmp7.not, label %for.cond.cleanup, label %for.body @@ -235,6 +391,13 @@ define hidden i32 @i32_mac_u16(ptr nocapture noundef readonly %a, ptr nocapture ; MAX-BANDWIDTH: i32x4.add ; MAX-BANDWIDTH: i32x4.add +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i32x4.extmul_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: i32x4.extmul_high_i16x8_u +; RELAXED-MAX-BANDWIDTH: i32x4.add +; RELAXED-MAX-BANDWIDTH: i32x4.add + entry: %cmp7.not = icmp eq i32 %N, 0 br i1 %cmp7.not, label %for.cond.cleanup, label %for.body @@ -277,6 +440,17 @@ define hidden i32 @i32_mac_u16_s16(ptr nocapture noundef readonly %a, ptr nocapt ; MAX-BANDWIDTH: i32x4.add ; MAX-BANDWIDTH: i32x4.add +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i32x4.extend_high_i16x8_s +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i32x4.extend_high_i16x8_u +; RELAXED-MAX-BANDWIDTH: i32x4.mul +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_s +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: i32x4.mul +; RELAXED-MAX-BANDWIDTH: i32x4.add +; RELAXED-MAX-BANDWIDTH: i32x4.add + entry: %cmp7.not = icmp eq i32 %N, 0 br i1 %cmp7.not, label %for.cond.cleanup, label %for.body @@ -335,6 +509,32 @@ define hidden i64 @i64_mac_u16(ptr nocapture noundef readonly %a, ptr nocapture ; MAX-BANDWIDTH: i64x2.extmul_low_i32x4_u ; MAX-BANDWIDTH: i64x2.add +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 12, 13, 14, 15, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 12, 13, 14, 15, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: i64x2.extmul_low_i32x4_u +; RELAXED-MAX-BANDWIDTH: i64x2.add +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 8, 9, 10, 11, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 8, 9, 10, 11, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: i64x2.extmul_low_i32x4_u +; RELAXED-MAX-BANDWIDTH: i64x2.add +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 4, 5, 6, 7, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: i8x16.shuffle 4, 5, 6, 7, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: i64x2.extmul_low_i32x4_u +; RELAXED-MAX-BANDWIDTH: i64x2.add +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: i32x4.extend_low_i16x8_u +; RELAXED-MAX-BANDWIDTH: i64x2.extmul_low_i32x4_u +; RELAXED-MAX-BANDWIDTH: i64x2.add + + entry: %cmp8.not = icmp eq i32 %N, 0 br i1 %cmp8.not, label %for.cond.cleanup, label %for.body @@ -375,6 +575,14 @@ define hidden i64 @i64_mac_u32(ptr nocapture noundef readonly %a, ptr nocapture ; MAX-BANDWIDTH: i64x2.extend_low_i32x4_u ; MAX-BANDWIDTH: i64x2.add +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: v128.load +; RELAXED-MAX-BANDWIDTH: i32x4.mul +; RELAXED-MAX-BANDWIDTH: i64x2.extend_high_i32x4_u +; RELAXED-MAX-BANDWIDTH: i64x2.add +; RELAXED-MAX-BANDWIDTH: i64x2.extend_low_i32x4_u +; RELAXED-MAX-BANDWIDTH: i64x2.add + entry: %cmp6.not = icmp eq i32 %N, 0 br i1 %cmp6.not, label %for.cond.cleanup, label %for.body diff --git a/llvm/test/CodeGen/WebAssembly/mem-intrinsics-offsets.ll b/llvm/test/CodeGen/WebAssembly/mem-intrinsics-offsets.ll new file mode 100644 index 0000000..abbd953 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/mem-intrinsics-offsets.ll @@ -0,0 +1,48 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mcpu=mvp -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s + +; This test ensures that loads and stores generated for small memcpy et al use +; constant offset folding. + + +target triple = "wasm32-unknown-unknown" + +define void @call_memset(ptr) #0 { +; CHECK-LABEL: call_memset: +; CHECK: .functype call_memset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i64.const $push0=, 0 +; CHECK-NEXT: i64.store 8($0):p2align=0, $pop0 +; CHECK-NEXT: i64.const $push1=, 0 +; CHECK-NEXT: i64.store 0($0):p2align=0, $pop1 +; CHECK-NEXT: # fallthrough-return + call void @llvm.memset.p0.i32(ptr align 1 %0, i8 0, i32 16, i1 false) + ret void +} + +define void @call_memcpy(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: call_memcpy: +; CHECK: .functype call_memcpy (i32, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i64.load $push0=, 8($1):p2align=0 +; CHECK-NEXT: i64.store 8($0):p2align=0, $pop0 +; CHECK-NEXT: i64.load $push1=, 0($1):p2align=0 +; CHECK-NEXT: i64.store 0($0):p2align=0, $pop1 +; CHECK-NEXT: # fallthrough-return + call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %src, i32 16, i1 false) + ret void +} + + +define void @call_memmove(ptr %dst, ptr %src) #0 { +; CHECK-LABEL: call_memmove: +; CHECK: .functype call_memmove (i32, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i64.load $2=, 0($1):p2align=0 +; CHECK-NEXT: i64.load $push0=, 8($1):p2align=0 +; CHECK-NEXT: i64.store 8($0):p2align=0, $pop0 +; CHECK-NEXT: i64.store 0($0):p2align=0, $2 +; CHECK-NEXT: # fallthrough-return + call void @llvm.memmove.p0.p0.i32(ptr align 1 %dst, ptr align 1 %src, i32 16, i1 false) + ret void +} diff --git a/llvm/test/CodeGen/WebAssembly/simd-dot-reductions.ll b/llvm/test/CodeGen/WebAssembly/simd-dot-reductions.ll new file mode 100644 index 0000000..3654aae --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/simd-dot-reductions.ll @@ -0,0 +1,106 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mattr=+simd128 | FileCheck %s + +target triple = "wasm32-unknown-unknown" + +define <4 x i32> @dot_sext_1(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: dot_sext_1: +; CHECK: .functype dot_sext_1 (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32x4.dot_i16x8_s +; CHECK-NEXT: # fallthrough-return + %sext1 = sext <8 x i16> %a to <8 x i32> + %sext2 = sext <8 x i16> %b to <8 x i32> + %mul = mul <8 x i32> %sext1, %sext2 + %shuffle1 = shufflevector <8 x i32> %mul, <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %shuffle2 = shufflevector <8 x i32> %mul, <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %res = add <4 x i32> %shuffle1, %shuffle2 + ret <4 x i32> %res +} + + +define <4 x i32> @dot_sext_2(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: dot_sext_2: +; CHECK: .functype dot_sext_2 (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32x4.dot_i16x8_s +; CHECK-NEXT: # fallthrough-return + %sext1 = sext <8 x i16> %a to <8 x i32> + %sext2 = sext <8 x i16> %b to <8 x i32> + %mul = mul <8 x i32> %sext1, %sext2 + %shuffle1 = shufflevector <8 x i32> %mul, <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %shuffle2 = shufflevector <8 x i32> %mul, <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %res = add <4 x i32> %shuffle2, %shuffle1 + ret <4 x i32> %res +} + +define <4 x i32> @dot_sext_self(<8 x i16> %v) { +; CHECK-LABEL: dot_sext_self: +; CHECK: .functype dot_sext_self (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32x4.dot_i16x8_s +; CHECK-NEXT: # fallthrough-return + %sext = sext <8 x i16> %v to <8 x i32> + %mul = mul <8 x i32> %sext, %sext + %shuffle1 = shufflevector <8 x i32> %mul, <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %shuffle2 = shufflevector <8 x i32> %mul, <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %res = add <4 x i32> %shuffle1, %shuffle2 + ret <4 x i32> %res +} + +; INFO: Negative test +define <4 x i32> @dot_zext(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: dot_zext: +; CHECK: .functype dot_zext (v128, v128) -> (v128) +; CHECK-NEXT: .local v128 +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32x4.extmul_low_i16x8_u +; CHECK-NEXT: local.tee 2 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32x4.extmul_high_i16x8_u +; CHECK-NEXT: local.tee 1 +; CHECK-NEXT: i8x16.shuffle 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 +; CHECK-NEXT: local.get 2 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i8x16.shuffle 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 +; CHECK-NEXT: i32x4.add +; CHECK-NEXT: # fallthrough-return + %zext1 = zext <8 x i16> %a to <8 x i32> + %zext2 = zext <8 x i16> %b to <8 x i32> + %mul = mul <8 x i32> %zext1, %zext2 + %shuffle1 = shufflevector <8 x i32> %mul, <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> + %shuffle2 = shufflevector <8 x i32> %mul, <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7> + %res = add <4 x i32> %shuffle1, %shuffle2 + ret <4 x i32> %res +} + +; INFO: Negative test +define <4 x i32> @dot_wrong_shuffle(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: dot_wrong_shuffle: +; CHECK: .functype dot_wrong_shuffle (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32x4.extmul_low_i16x8_s +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32x4.extmul_high_i16x8_s +; CHECK-NEXT: i32x4.add +; CHECK-NEXT: # fallthrough-return + %sext1 = sext <8 x i16> %a to <8 x i32> + %sext2 = sext <8 x i16> %b to <8 x i32> + %mul = mul <8 x i32> %sext1, %sext2 + %shuffle1 = shufflevector <8 x i32> %mul, <8 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %shuffle2 = shufflevector <8 x i32> %mul, <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + %res = add <4 x i32> %shuffle1, %shuffle2 + ret <4 x i32> %res +} diff --git a/llvm/test/CodeGen/WebAssembly/simd-relaxed-dot.ll b/llvm/test/CodeGen/WebAssembly/simd-relaxed-dot.ll new file mode 100644 index 0000000..9716cbe --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/simd-relaxed-dot.ll @@ -0,0 +1,104 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+fp16,+simd128,+relaxed-simd | FileCheck %s + +target triple = "wasm32" +; relaxed_dot stands for relaxed_dot_i8x16_i7x16_s, as in td +; relaxed_dot_add stands for i32x4.relaxed_dot_i8x16_i7x16_add_s, as in td + +define <8 x i16> @relaxed_dot_sext_1(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: relaxed_dot_sext_1: +; CHECK: .functype relaxed_dot_sext_1 (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i16x8.relaxed_dot_i8x16_i7x16_s $push0=, $0, $1 +; CHECK-NEXT: return $pop0 + %sext1 = sext <16 x i8> %a to <16 x i16> + %sext2 = sext <16 x i8> %b to <16 x i16> + %mul = mul <16 x i16> %sext1, %sext2 + %shuffle1 = shufflevector <16 x i16> %mul, <16 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> + %shuffle2 = shufflevector <16 x i16> %mul, <16 x i16> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> + %res = add <8 x i16> %shuffle1, %shuffle2 + ret <8 x i16> %res +} + + +define <8 x i16> @relaxed_dot_sext_2(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: relaxed_dot_sext_2: +; CHECK: .functype relaxed_dot_sext_2 (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i16x8.relaxed_dot_i8x16_i7x16_s $push0=, $0, $1 +; CHECK-NEXT: return $pop0 + %sext1 = sext <16 x i8> %a to <16 x i16> + %sext2 = sext <16 x i8> %b to <16 x i16> + %mul = mul <16 x i16> %sext1, %sext2 + %shuffle1 = shufflevector <16 x i16> %mul, <16 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> + %shuffle2 = shufflevector <16 x i16> %mul, <16 x i16> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> + %res = add <8 x i16> %shuffle2, %shuffle1 + ret <8 x i16> %res +} + +define <8 x i16> @relaxed_dot_sext_self(<16 x i8> %v) { +; CHECK-LABEL: relaxed_dot_sext_self: +; CHECK: .functype relaxed_dot_sext_self (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i16x8.relaxed_dot_i8x16_i7x16_s $push0=, $0, $0 +; CHECK-NEXT: return $pop0 + %sext = sext <16 x i8> %v to <16 x i16> + %mul = mul <16 x i16> %sext, %sext + %shuffle1 = shufflevector <16 x i16> %mul, <16 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> + %shuffle2 = shufflevector <16 x i16> %mul, <16 x i16> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> + %res = add <8 x i16> %shuffle1, %shuffle2 + ret <8 x i16> %res +} + +define <4 x i32> @relaxed_dot_add_from_relaxed_dot(<16 x i8> %a, <16 x i8> %b, <4 x i32> %c) { +; CHECK-LABEL: relaxed_dot_add_from_relaxed_dot: +; CHECK: .functype relaxed_dot_add_from_relaxed_dot (v128, v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32x4.relaxed_dot_i8x16_i7x16_add_s $push0=, $0, $1, $2 +; CHECK-NEXT: return $pop0 + %relaxed_dot_call = call <8 x i16> @llvm.wasm.relaxed.dot.i8x16.i7x16.signed(<16 x i8> %a, <16 x i8> %b) + %sext = call <4 x i32> @llvm.wasm.extadd.pairwise.signed.v4i32(<8 x i16> %relaxed_dot_call) + %res = add <4 x i32> %sext, %c + ret <4 x i32> %res +} + +; INFO: Negative test +define <8 x i16> @relaxed_dot_zext(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: relaxed_dot_zext: +; CHECK: .functype relaxed_dot_zext (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i16x8.extmul_low_i8x16_u $push6=, $0, $1 +; CHECK-NEXT: local.tee $push5=, $2=, $pop6 +; CHECK-NEXT: i16x8.extmul_high_i8x16_u $push4=, $0, $1 +; CHECK-NEXT: local.tee $push3=, $1=, $pop4 +; CHECK-NEXT: i8x16.shuffle $push1=, $pop5, $pop3, 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 +; CHECK-NEXT: i8x16.shuffle $push0=, $2, $1, 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 +; CHECK-NEXT: i16x8.add $push2=, $pop1, $pop0 +; CHECK-NEXT: return $pop2 + %zext1 = zext <16 x i8> %a to <16 x i16> + %zext2 = zext <16 x i8> %b to <16 x i16> + %mul = mul <16 x i16> %zext1, %zext2 + %shuffle1 = shufflevector <16 x i16> %mul, <16 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> + %shuffle2 = shufflevector <16 x i16> %mul, <16 x i16> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> + %res = add <8 x i16> %shuffle1, %shuffle2 + ret <8 x i16> %res + +} + +; INFO: Negative test +define <8 x i16> @relaxed_dot_wrong_shuffle(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: relaxed_dot_wrong_shuffle: +; CHECK: .functype relaxed_dot_wrong_shuffle (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i16x8.extmul_low_i8x16_s $push1=, $0, $1 +; CHECK-NEXT: i16x8.extmul_high_i8x16_s $push0=, $0, $1 +; CHECK-NEXT: i16x8.add $push2=, $pop1, $pop0 +; CHECK-NEXT: return $pop2 + %sext1 = sext <16 x i8> %a to <16 x i16> + %sext2 = sext <16 x i8> %b to <16 x i16> + %mul = mul <16 x i16> %sext1, %sext2 + %shuffle1 = shufflevector <16 x i16> %mul, <16 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + %shuffle2 = shufflevector <16 x i16> %mul, <16 x i16> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + %res = add <8 x i16> %shuffle1, %shuffle2 + ret <8 x i16> %res +} diff --git a/llvm/test/CodeGen/WebAssembly/simd-relaxed-fma.ll b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fma.ll index e065de3..600241a 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-relaxed-fma.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fma.ll @@ -2,9 +2,278 @@ ; RUN: llc < %s -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+fp16,+simd128,+relaxed-simd | FileCheck %s --check-prefix=RELAXED ; RUN: llc < %s -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+fp16,+simd128, | FileCheck %s --check-prefix=STRICT +; RUN: llc < %s -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128 | FileCheck %s --check-prefix=NOFP16 +; RUN: llc < %s -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s --check-prefix=NOSIMD target triple = "wasm32" +define half @fadd_fmul_contract_f16(half %a, half %b, half %c) { +; RELAXED-LABEL: fadd_fmul_contract_f16: +; RELAXED: .functype fadd_fmul_contract_f16 (f32, f32, f32) -> (f32) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: call $push0=, __truncsfhf2, $0 +; RELAXED-NEXT: call $push1=, __extendhfsf2, $pop0 +; RELAXED-NEXT: call $push2=, __truncsfhf2, $1 +; RELAXED-NEXT: call $push3=, __extendhfsf2, $pop2 +; RELAXED-NEXT: f32.mul $push4=, $pop1, $pop3 +; RELAXED-NEXT: call $push5=, __truncsfhf2, $2 +; RELAXED-NEXT: call $push6=, __extendhfsf2, $pop5 +; RELAXED-NEXT: f32.add $push7=, $pop4, $pop6 +; RELAXED-NEXT: return $pop7 +; +; STRICT-LABEL: fadd_fmul_contract_f16: +; STRICT: .functype fadd_fmul_contract_f16 (f32, f32, f32) -> (f32) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: call $push0=, __truncsfhf2, $0 +; STRICT-NEXT: call $push1=, __extendhfsf2, $pop0 +; STRICT-NEXT: call $push2=, __truncsfhf2, $1 +; STRICT-NEXT: call $push3=, __extendhfsf2, $pop2 +; STRICT-NEXT: f32.mul $push4=, $pop1, $pop3 +; STRICT-NEXT: call $push5=, __truncsfhf2, $2 +; STRICT-NEXT: call $push6=, __extendhfsf2, $pop5 +; STRICT-NEXT: f32.add $push7=, $pop4, $pop6 +; STRICT-NEXT: return $pop7 +; +; NOFP16-LABEL: fadd_fmul_contract_f16: +; NOFP16: .functype fadd_fmul_contract_f16 (f32, f32, f32) -> (f32) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: call $push0=, __truncsfhf2, $0 +; NOFP16-NEXT: call $push1=, __extendhfsf2, $pop0 +; NOFP16-NEXT: call $push2=, __truncsfhf2, $1 +; NOFP16-NEXT: call $push3=, __extendhfsf2, $pop2 +; NOFP16-NEXT: f32.mul $push4=, $pop1, $pop3 +; NOFP16-NEXT: call $push5=, __truncsfhf2, $2 +; NOFP16-NEXT: call $push6=, __extendhfsf2, $pop5 +; NOFP16-NEXT: f32.add $push7=, $pop4, $pop6 +; NOFP16-NEXT: return $pop7 +; +; NOSIMD-LABEL: fadd_fmul_contract_f16: +; NOSIMD: .functype fadd_fmul_contract_f16 (f32, f32, f32) -> (f32) +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: call $push0=, __truncsfhf2, $0 +; NOSIMD-NEXT: call $push1=, __extendhfsf2, $pop0 +; NOSIMD-NEXT: call $push2=, __truncsfhf2, $1 +; NOSIMD-NEXT: call $push3=, __extendhfsf2, $pop2 +; NOSIMD-NEXT: f32.mul $push4=, $pop1, $pop3 +; NOSIMD-NEXT: call $push5=, __truncsfhf2, $2 +; NOSIMD-NEXT: call $push6=, __extendhfsf2, $pop5 +; NOSIMD-NEXT: f32.add $push7=, $pop4, $pop6 +; NOSIMD-NEXT: return $pop7 + %mul = fmul contract half %b, %a + %add = fadd contract half %mul, %c + ret half %add +} + +define half @fmuladd_contract_f16(half %a, half %b, half %c) { +; RELAXED-LABEL: fmuladd_contract_f16: +; RELAXED: .functype fmuladd_contract_f16 (f32, f32, f32) -> (f32) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: call $push0=, __truncsfhf2, $1 +; RELAXED-NEXT: call $push1=, __extendhfsf2, $pop0 +; RELAXED-NEXT: call $push2=, __truncsfhf2, $0 +; RELAXED-NEXT: call $push3=, __extendhfsf2, $pop2 +; RELAXED-NEXT: f32.mul $push4=, $pop1, $pop3 +; RELAXED-NEXT: call $push5=, __truncsfhf2, $2 +; RELAXED-NEXT: call $push6=, __extendhfsf2, $pop5 +; RELAXED-NEXT: f32.add $push7=, $pop4, $pop6 +; RELAXED-NEXT: return $pop7 +; +; STRICT-LABEL: fmuladd_contract_f16: +; STRICT: .functype fmuladd_contract_f16 (f32, f32, f32) -> (f32) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: call $push0=, __truncsfhf2, $1 +; STRICT-NEXT: call $push1=, __extendhfsf2, $pop0 +; STRICT-NEXT: call $push2=, __truncsfhf2, $0 +; STRICT-NEXT: call $push3=, __extendhfsf2, $pop2 +; STRICT-NEXT: f32.mul $push4=, $pop1, $pop3 +; STRICT-NEXT: call $push5=, __truncsfhf2, $2 +; STRICT-NEXT: call $push6=, __extendhfsf2, $pop5 +; STRICT-NEXT: f32.add $push7=, $pop4, $pop6 +; STRICT-NEXT: return $pop7 +; +; NOFP16-LABEL: fmuladd_contract_f16: +; NOFP16: .functype fmuladd_contract_f16 (f32, f32, f32) -> (f32) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: call $push0=, __truncsfhf2, $1 +; NOFP16-NEXT: call $push1=, __extendhfsf2, $pop0 +; NOFP16-NEXT: call $push2=, __truncsfhf2, $0 +; NOFP16-NEXT: call $push3=, __extendhfsf2, $pop2 +; NOFP16-NEXT: f32.mul $push4=, $pop1, $pop3 +; NOFP16-NEXT: call $push5=, __truncsfhf2, $2 +; NOFP16-NEXT: call $push6=, __extendhfsf2, $pop5 +; NOFP16-NEXT: f32.add $push7=, $pop4, $pop6 +; NOFP16-NEXT: return $pop7 +; +; NOSIMD-LABEL: fmuladd_contract_f16: +; NOSIMD: .functype fmuladd_contract_f16 (f32, f32, f32) -> (f32) +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: call $push0=, __truncsfhf2, $1 +; NOSIMD-NEXT: call $push1=, __extendhfsf2, $pop0 +; NOSIMD-NEXT: call $push2=, __truncsfhf2, $0 +; NOSIMD-NEXT: call $push3=, __extendhfsf2, $pop2 +; NOSIMD-NEXT: f32.mul $push4=, $pop1, $pop3 +; NOSIMD-NEXT: call $push5=, __truncsfhf2, $2 +; NOSIMD-NEXT: call $push6=, __extendhfsf2, $pop5 +; NOSIMD-NEXT: f32.add $push7=, $pop4, $pop6 +; NOSIMD-NEXT: return $pop7 + %fma = call contract half @llvm.fmuladd(half %a, half %b, half %c) + ret half %fma +} + +define half @fmuladd_f16(half %a, half %b, half %c) { +; RELAXED-LABEL: fmuladd_f16: +; RELAXED: .functype fmuladd_f16 (f32, f32, f32) -> (f32) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: call $push0=, __truncsfhf2, $1 +; RELAXED-NEXT: call $push1=, __extendhfsf2, $pop0 +; RELAXED-NEXT: call $push2=, __truncsfhf2, $0 +; RELAXED-NEXT: call $push3=, __extendhfsf2, $pop2 +; RELAXED-NEXT: f32.mul $push4=, $pop1, $pop3 +; RELAXED-NEXT: call $push5=, __truncsfhf2, $2 +; RELAXED-NEXT: call $push6=, __extendhfsf2, $pop5 +; RELAXED-NEXT: f32.add $push7=, $pop4, $pop6 +; RELAXED-NEXT: return $pop7 +; +; STRICT-LABEL: fmuladd_f16: +; STRICT: .functype fmuladd_f16 (f32, f32, f32) -> (f32) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: call $push0=, __truncsfhf2, $1 +; STRICT-NEXT: call $push1=, __extendhfsf2, $pop0 +; STRICT-NEXT: call $push2=, __truncsfhf2, $0 +; STRICT-NEXT: call $push3=, __extendhfsf2, $pop2 +; STRICT-NEXT: f32.mul $push4=, $pop1, $pop3 +; STRICT-NEXT: call $push5=, __truncsfhf2, $2 +; STRICT-NEXT: call $push6=, __extendhfsf2, $pop5 +; STRICT-NEXT: f32.add $push7=, $pop4, $pop6 +; STRICT-NEXT: return $pop7 +; +; NOFP16-LABEL: fmuladd_f16: +; NOFP16: .functype fmuladd_f16 (f32, f32, f32) -> (f32) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: call $push0=, __truncsfhf2, $1 +; NOFP16-NEXT: call $push1=, __extendhfsf2, $pop0 +; NOFP16-NEXT: call $push2=, __truncsfhf2, $0 +; NOFP16-NEXT: call $push3=, __extendhfsf2, $pop2 +; NOFP16-NEXT: f32.mul $push4=, $pop1, $pop3 +; NOFP16-NEXT: call $push5=, __truncsfhf2, $2 +; NOFP16-NEXT: call $push6=, __extendhfsf2, $pop5 +; NOFP16-NEXT: f32.add $push7=, $pop4, $pop6 +; NOFP16-NEXT: return $pop7 +; +; NOSIMD-LABEL: fmuladd_f16: +; NOSIMD: .functype fmuladd_f16 (f32, f32, f32) -> (f32) +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: call $push0=, __truncsfhf2, $1 +; NOSIMD-NEXT: call $push1=, __extendhfsf2, $pop0 +; NOSIMD-NEXT: call $push2=, __truncsfhf2, $0 +; NOSIMD-NEXT: call $push3=, __extendhfsf2, $pop2 +; NOSIMD-NEXT: f32.mul $push4=, $pop1, $pop3 +; NOSIMD-NEXT: call $push5=, __truncsfhf2, $2 +; NOSIMD-NEXT: call $push6=, __extendhfsf2, $pop5 +; NOSIMD-NEXT: f32.add $push7=, $pop4, $pop6 +; NOSIMD-NEXT: return $pop7 + %fma = call half @llvm.fmuladd(half %a, half %b, half %c) + ret half %fma +} + + +define float @fadd_fmul_contract_f32(float %a, float %b, float %c) { +; RELAXED-LABEL: fadd_fmul_contract_f32: +; RELAXED: .functype fadd_fmul_contract_f32 (f32, f32, f32) -> (f32) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f32.mul $push0=, $1, $0 +; RELAXED-NEXT: f32.add $push1=, $pop0, $2 +; RELAXED-NEXT: return $pop1 +; +; STRICT-LABEL: fadd_fmul_contract_f32: +; STRICT: .functype fadd_fmul_contract_f32 (f32, f32, f32) -> (f32) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f32.mul $push0=, $1, $0 +; STRICT-NEXT: f32.add $push1=, $pop0, $2 +; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fadd_fmul_contract_f32: +; NOFP16: .functype fadd_fmul_contract_f32 (f32, f32, f32) -> (f32) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f32.mul $push0=, $1, $0 +; NOFP16-NEXT: f32.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fadd_fmul_contract_f32: +; NOSIMD: .functype fadd_fmul_contract_f32 (f32, f32, f32) -> (f32) +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f32.mul $push0=, $1, $0 +; NOSIMD-NEXT: f32.add $push1=, $pop0, $2 +; NOSIMD-NEXT: return $pop1 + %mul = fmul contract float %b, %a + %add = fadd contract float %mul, %c + ret float %add +} + +define float @fmuladd_contract_f32(float %a, float %b, float %c) { +; RELAXED-LABEL: fmuladd_contract_f32: +; RELAXED: .functype fmuladd_contract_f32 (f32, f32, f32) -> (f32) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f32.mul $push0=, $0, $1 +; RELAXED-NEXT: f32.add $push1=, $pop0, $2 +; RELAXED-NEXT: return $pop1 +; +; STRICT-LABEL: fmuladd_contract_f32: +; STRICT: .functype fmuladd_contract_f32 (f32, f32, f32) -> (f32) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f32.mul $push0=, $0, $1 +; STRICT-NEXT: f32.add $push1=, $pop0, $2 +; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fmuladd_contract_f32: +; NOFP16: .functype fmuladd_contract_f32 (f32, f32, f32) -> (f32) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f32.mul $push0=, $0, $1 +; NOFP16-NEXT: f32.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fmuladd_contract_f32: +; NOSIMD: .functype fmuladd_contract_f32 (f32, f32, f32) -> (f32) +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f32.mul $push0=, $0, $1 +; NOSIMD-NEXT: f32.add $push1=, $pop0, $2 +; NOSIMD-NEXT: return $pop1 + %fma = call contract float @llvm.fmuladd(float %a, float %b, float %c) + ret float %fma +} + +define float @fmuladd_f32(float %a, float %b, float %c) { +; RELAXED-LABEL: fmuladd_f32: +; RELAXED: .functype fmuladd_f32 (f32, f32, f32) -> (f32) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f32.mul $push0=, $0, $1 +; RELAXED-NEXT: f32.add $push1=, $pop0, $2 +; RELAXED-NEXT: return $pop1 +; +; STRICT-LABEL: fmuladd_f32: +; STRICT: .functype fmuladd_f32 (f32, f32, f32) -> (f32) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f32.mul $push0=, $0, $1 +; STRICT-NEXT: f32.add $push1=, $pop0, $2 +; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fmuladd_f32: +; NOFP16: .functype fmuladd_f32 (f32, f32, f32) -> (f32) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f32.mul $push0=, $0, $1 +; NOFP16-NEXT: f32.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fmuladd_f32: +; NOSIMD: .functype fmuladd_f32 (f32, f32, f32) -> (f32) +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f32.mul $push0=, $0, $1 +; NOSIMD-NEXT: f32.add $push1=, $pop0, $2 +; NOSIMD-NEXT: return $pop1 + %fma = call float @llvm.fmuladd(float %a, float %b, float %c) + ret float %fma +} + define double @fadd_fmul_contract_f64(double %a, double %b, double %c) { ; RELAXED-LABEL: fadd_fmul_contract_f64: ; RELAXED: .functype fadd_fmul_contract_f64 (f64, f64, f64) -> (f64) @@ -19,16 +288,94 @@ define double @fadd_fmul_contract_f64(double %a, double %b, double %c) { ; STRICT-NEXT: f64.mul $push0=, $1, $0 ; STRICT-NEXT: f64.add $push1=, $pop0, $2 ; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fadd_fmul_contract_f64: +; NOFP16: .functype fadd_fmul_contract_f64 (f64, f64, f64) -> (f64) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f64.mul $push0=, $1, $0 +; NOFP16-NEXT: f64.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fadd_fmul_contract_f64: +; NOSIMD: .functype fadd_fmul_contract_f64 (f64, f64, f64) -> (f64) +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f64.mul $push0=, $1, $0 +; NOSIMD-NEXT: f64.add $push1=, $pop0, $2 +; NOSIMD-NEXT: return $pop1 %mul = fmul contract double %b, %a %add = fadd contract double %mul, %c ret double %add } +define double @fmuladd_f64(double %a, double %b, double %c) { +; RELAXED-LABEL: fmuladd_f64: +; RELAXED: .functype fmuladd_f64 (f64, f64, f64) -> (f64) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f64.mul $push0=, $0, $1 +; RELAXED-NEXT: f64.add $push1=, $pop0, $2 +; RELAXED-NEXT: return $pop1 +; +; STRICT-LABEL: fmuladd_f64: +; STRICT: .functype fmuladd_f64 (f64, f64, f64) -> (f64) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f64.mul $push0=, $0, $1 +; STRICT-NEXT: f64.add $push1=, $pop0, $2 +; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fmuladd_f64: +; NOFP16: .functype fmuladd_f64 (f64, f64, f64) -> (f64) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f64.mul $push0=, $0, $1 +; NOFP16-NEXT: f64.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fmuladd_f64: +; NOSIMD: .functype fmuladd_f64 (f64, f64, f64) -> (f64) +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f64.mul $push0=, $0, $1 +; NOSIMD-NEXT: f64.add $push1=, $pop0, $2 +; NOSIMD-NEXT: return $pop1 + %fma = call double @llvm.fmuladd(double %a, double %b, double %c) + ret double %fma +} + +define double @fmuladd_contract_f64(double %a, double %b, double %c) { +; RELAXED-LABEL: fmuladd_contract_f64: +; RELAXED: .functype fmuladd_contract_f64 (f64, f64, f64) -> (f64) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f64.mul $push0=, $0, $1 +; RELAXED-NEXT: f64.add $push1=, $pop0, $2 +; RELAXED-NEXT: return $pop1 +; +; STRICT-LABEL: fmuladd_contract_f64: +; STRICT: .functype fmuladd_contract_f64 (f64, f64, f64) -> (f64) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f64.mul $push0=, $0, $1 +; STRICT-NEXT: f64.add $push1=, $pop0, $2 +; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fmuladd_contract_f64: +; NOFP16: .functype fmuladd_contract_f64 (f64, f64, f64) -> (f64) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f64.mul $push0=, $0, $1 +; NOFP16-NEXT: f64.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fmuladd_contract_f64: +; NOSIMD: .functype fmuladd_contract_f64 (f64, f64, f64) -> (f64) +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f64.mul $push0=, $0, $1 +; NOSIMD-NEXT: f64.add $push1=, $pop0, $2 +; NOSIMD-NEXT: return $pop1 + %fma = call contract double @llvm.fmuladd(double %a, double %b, double %c) + ret double %fma +} + define <4 x float> @fadd_fmul_contract_4xf32(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; RELAXED-LABEL: fadd_fmul_contract_4xf32: ; RELAXED: .functype fadd_fmul_contract_4xf32 (v128, v128, v128) -> (v128) ; RELAXED-NEXT: # %bb.0: -; RELAXED-NEXT: f32x4.relaxed_madd $push0=, $2, $1, $0 +; RELAXED-NEXT: f32x4.relaxed_madd $push0=, $1, $0, $2 ; RELAXED-NEXT: return $pop0 ; ; STRICT-LABEL: fadd_fmul_contract_4xf32: @@ -37,31 +384,222 @@ define <4 x float> @fadd_fmul_contract_4xf32(<4 x float> %a, <4 x float> %b, <4 ; STRICT-NEXT: f32x4.mul $push0=, $1, $0 ; STRICT-NEXT: f32x4.add $push1=, $pop0, $2 ; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fadd_fmul_contract_4xf32: +; NOFP16: .functype fadd_fmul_contract_4xf32 (v128, v128, v128) -> (v128) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f32x4.mul $push0=, $1, $0 +; NOFP16-NEXT: f32x4.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fadd_fmul_contract_4xf32: +; NOSIMD: .functype fadd_fmul_contract_4xf32 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f32.mul $push0=, $8, $4 +; NOSIMD-NEXT: f32.add $push1=, $pop0, $12 +; NOSIMD-NEXT: f32.store 12($0), $pop1 +; NOSIMD-NEXT: f32.mul $push2=, $7, $3 +; NOSIMD-NEXT: f32.add $push3=, $pop2, $11 +; NOSIMD-NEXT: f32.store 8($0), $pop3 +; NOSIMD-NEXT: f32.mul $push4=, $6, $2 +; NOSIMD-NEXT: f32.add $push5=, $pop4, $10 +; NOSIMD-NEXT: f32.store 4($0), $pop5 +; NOSIMD-NEXT: f32.mul $push6=, $5, $1 +; NOSIMD-NEXT: f32.add $push7=, $pop6, $9 +; NOSIMD-NEXT: f32.store 0($0), $pop7 +; NOSIMD-NEXT: return %mul = fmul contract <4 x float> %b, %a %add = fadd contract <4 x float> %mul, %c ret <4 x float> %add } - define <8 x half> @fadd_fmul_contract_8xf16(<8 x half> %a, <8 x half> %b, <8 x half> %c) { ; RELAXED-LABEL: fadd_fmul_contract_8xf16: ; RELAXED: .functype fadd_fmul_contract_8xf16 (v128, v128, v128) -> (v128) ; RELAXED-NEXT: # %bb.0: -; RELAXED-NEXT: f16x8.relaxed_madd $push0=, $2, $1, $0 +; RELAXED-NEXT: f16x8.madd $push0=, $1, $0, $2 ; RELAXED-NEXT: return $pop0 ; ; STRICT-LABEL: fadd_fmul_contract_8xf16: ; STRICT: .functype fadd_fmul_contract_8xf16 (v128, v128, v128) -> (v128) ; STRICT-NEXT: # %bb.0: -; STRICT-NEXT: f16x8.mul $push0=, $1, $0 -; STRICT-NEXT: f16x8.add $push1=, $pop0, $2 -; STRICT-NEXT: return $pop1 +; STRICT-NEXT: f16x8.madd $push0=, $1, $0, $2 +; STRICT-NEXT: return $pop0 +; +; NOFP16-LABEL: fadd_fmul_contract_8xf16: +; NOFP16: .functype fadd_fmul_contract_8xf16 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: call $push0=, __truncsfhf2, $8 +; NOFP16-NEXT: call $push1=, __extendhfsf2, $pop0 +; NOFP16-NEXT: call $push2=, __truncsfhf2, $16 +; NOFP16-NEXT: call $push3=, __extendhfsf2, $pop2 +; NOFP16-NEXT: f32.mul $push4=, $pop1, $pop3 +; NOFP16-NEXT: call $push5=, __truncsfhf2, $24 +; NOFP16-NEXT: call $push6=, __extendhfsf2, $pop5 +; NOFP16-NEXT: f32.add $push7=, $pop4, $pop6 +; NOFP16-NEXT: call $push8=, __truncsfhf2, $pop7 +; NOFP16-NEXT: i32.store16 14($0), $pop8 +; NOFP16-NEXT: call $push9=, __truncsfhf2, $7 +; NOFP16-NEXT: call $push10=, __extendhfsf2, $pop9 +; NOFP16-NEXT: call $push11=, __truncsfhf2, $15 +; NOFP16-NEXT: call $push12=, __extendhfsf2, $pop11 +; NOFP16-NEXT: f32.mul $push13=, $pop10, $pop12 +; NOFP16-NEXT: call $push14=, __truncsfhf2, $23 +; NOFP16-NEXT: call $push15=, __extendhfsf2, $pop14 +; NOFP16-NEXT: f32.add $push16=, $pop13, $pop15 +; NOFP16-NEXT: call $push17=, __truncsfhf2, $pop16 +; NOFP16-NEXT: i32.store16 12($0), $pop17 +; NOFP16-NEXT: call $push18=, __truncsfhf2, $6 +; NOFP16-NEXT: call $push19=, __extendhfsf2, $pop18 +; NOFP16-NEXT: call $push20=, __truncsfhf2, $14 +; NOFP16-NEXT: call $push21=, __extendhfsf2, $pop20 +; NOFP16-NEXT: f32.mul $push22=, $pop19, $pop21 +; NOFP16-NEXT: call $push23=, __truncsfhf2, $22 +; NOFP16-NEXT: call $push24=, __extendhfsf2, $pop23 +; NOFP16-NEXT: f32.add $push25=, $pop22, $pop24 +; NOFP16-NEXT: call $push26=, __truncsfhf2, $pop25 +; NOFP16-NEXT: i32.store16 10($0), $pop26 +; NOFP16-NEXT: call $push27=, __truncsfhf2, $5 +; NOFP16-NEXT: call $push28=, __extendhfsf2, $pop27 +; NOFP16-NEXT: call $push29=, __truncsfhf2, $13 +; NOFP16-NEXT: call $push30=, __extendhfsf2, $pop29 +; NOFP16-NEXT: f32.mul $push31=, $pop28, $pop30 +; NOFP16-NEXT: call $push32=, __truncsfhf2, $21 +; NOFP16-NEXT: call $push33=, __extendhfsf2, $pop32 +; NOFP16-NEXT: f32.add $push34=, $pop31, $pop33 +; NOFP16-NEXT: call $push35=, __truncsfhf2, $pop34 +; NOFP16-NEXT: i32.store16 8($0), $pop35 +; NOFP16-NEXT: call $push36=, __truncsfhf2, $4 +; NOFP16-NEXT: call $push37=, __extendhfsf2, $pop36 +; NOFP16-NEXT: call $push38=, __truncsfhf2, $12 +; NOFP16-NEXT: call $push39=, __extendhfsf2, $pop38 +; NOFP16-NEXT: f32.mul $push40=, $pop37, $pop39 +; NOFP16-NEXT: call $push41=, __truncsfhf2, $20 +; NOFP16-NEXT: call $push42=, __extendhfsf2, $pop41 +; NOFP16-NEXT: f32.add $push43=, $pop40, $pop42 +; NOFP16-NEXT: call $push44=, __truncsfhf2, $pop43 +; NOFP16-NEXT: i32.store16 6($0), $pop44 +; NOFP16-NEXT: call $push45=, __truncsfhf2, $3 +; NOFP16-NEXT: call $push46=, __extendhfsf2, $pop45 +; NOFP16-NEXT: call $push47=, __truncsfhf2, $11 +; NOFP16-NEXT: call $push48=, __extendhfsf2, $pop47 +; NOFP16-NEXT: f32.mul $push49=, $pop46, $pop48 +; NOFP16-NEXT: call $push50=, __truncsfhf2, $19 +; NOFP16-NEXT: call $push51=, __extendhfsf2, $pop50 +; NOFP16-NEXT: f32.add $push52=, $pop49, $pop51 +; NOFP16-NEXT: call $push53=, __truncsfhf2, $pop52 +; NOFP16-NEXT: i32.store16 4($0), $pop53 +; NOFP16-NEXT: call $push54=, __truncsfhf2, $2 +; NOFP16-NEXT: call $push55=, __extendhfsf2, $pop54 +; NOFP16-NEXT: call $push56=, __truncsfhf2, $10 +; NOFP16-NEXT: call $push57=, __extendhfsf2, $pop56 +; NOFP16-NEXT: f32.mul $push58=, $pop55, $pop57 +; NOFP16-NEXT: call $push59=, __truncsfhf2, $18 +; NOFP16-NEXT: call $push60=, __extendhfsf2, $pop59 +; NOFP16-NEXT: f32.add $push61=, $pop58, $pop60 +; NOFP16-NEXT: call $push62=, __truncsfhf2, $pop61 +; NOFP16-NEXT: i32.store16 2($0), $pop62 +; NOFP16-NEXT: call $push63=, __truncsfhf2, $1 +; NOFP16-NEXT: call $push64=, __extendhfsf2, $pop63 +; NOFP16-NEXT: call $push65=, __truncsfhf2, $9 +; NOFP16-NEXT: call $push66=, __extendhfsf2, $pop65 +; NOFP16-NEXT: f32.mul $push67=, $pop64, $pop66 +; NOFP16-NEXT: call $push68=, __truncsfhf2, $17 +; NOFP16-NEXT: call $push69=, __extendhfsf2, $pop68 +; NOFP16-NEXT: f32.add $push70=, $pop67, $pop69 +; NOFP16-NEXT: call $push71=, __truncsfhf2, $pop70 +; NOFP16-NEXT: i32.store16 0($0), $pop71 +; NOFP16-NEXT: return +; +; NOSIMD-LABEL: fadd_fmul_contract_8xf16: +; NOSIMD: .functype fadd_fmul_contract_8xf16 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: call $push0=, __truncsfhf2, $8 +; NOSIMD-NEXT: call $push1=, __extendhfsf2, $pop0 +; NOSIMD-NEXT: call $push2=, __truncsfhf2, $16 +; NOSIMD-NEXT: call $push3=, __extendhfsf2, $pop2 +; NOSIMD-NEXT: f32.mul $push4=, $pop1, $pop3 +; NOSIMD-NEXT: call $push5=, __truncsfhf2, $24 +; NOSIMD-NEXT: call $push6=, __extendhfsf2, $pop5 +; NOSIMD-NEXT: f32.add $push7=, $pop4, $pop6 +; NOSIMD-NEXT: call $push8=, __truncsfhf2, $pop7 +; NOSIMD-NEXT: i32.store16 14($0), $pop8 +; NOSIMD-NEXT: call $push9=, __truncsfhf2, $7 +; NOSIMD-NEXT: call $push10=, __extendhfsf2, $pop9 +; NOSIMD-NEXT: call $push11=, __truncsfhf2, $15 +; NOSIMD-NEXT: call $push12=, __extendhfsf2, $pop11 +; NOSIMD-NEXT: f32.mul $push13=, $pop10, $pop12 +; NOSIMD-NEXT: call $push14=, __truncsfhf2, $23 +; NOSIMD-NEXT: call $push15=, __extendhfsf2, $pop14 +; NOSIMD-NEXT: f32.add $push16=, $pop13, $pop15 +; NOSIMD-NEXT: call $push17=, __truncsfhf2, $pop16 +; NOSIMD-NEXT: i32.store16 12($0), $pop17 +; NOSIMD-NEXT: call $push18=, __truncsfhf2, $6 +; NOSIMD-NEXT: call $push19=, __extendhfsf2, $pop18 +; NOSIMD-NEXT: call $push20=, __truncsfhf2, $14 +; NOSIMD-NEXT: call $push21=, __extendhfsf2, $pop20 +; NOSIMD-NEXT: f32.mul $push22=, $pop19, $pop21 +; NOSIMD-NEXT: call $push23=, __truncsfhf2, $22 +; NOSIMD-NEXT: call $push24=, __extendhfsf2, $pop23 +; NOSIMD-NEXT: f32.add $push25=, $pop22, $pop24 +; NOSIMD-NEXT: call $push26=, __truncsfhf2, $pop25 +; NOSIMD-NEXT: i32.store16 10($0), $pop26 +; NOSIMD-NEXT: call $push27=, __truncsfhf2, $5 +; NOSIMD-NEXT: call $push28=, __extendhfsf2, $pop27 +; NOSIMD-NEXT: call $push29=, __truncsfhf2, $13 +; NOSIMD-NEXT: call $push30=, __extendhfsf2, $pop29 +; NOSIMD-NEXT: f32.mul $push31=, $pop28, $pop30 +; NOSIMD-NEXT: call $push32=, __truncsfhf2, $21 +; NOSIMD-NEXT: call $push33=, __extendhfsf2, $pop32 +; NOSIMD-NEXT: f32.add $push34=, $pop31, $pop33 +; NOSIMD-NEXT: call $push35=, __truncsfhf2, $pop34 +; NOSIMD-NEXT: i32.store16 8($0), $pop35 +; NOSIMD-NEXT: call $push36=, __truncsfhf2, $4 +; NOSIMD-NEXT: call $push37=, __extendhfsf2, $pop36 +; NOSIMD-NEXT: call $push38=, __truncsfhf2, $12 +; NOSIMD-NEXT: call $push39=, __extendhfsf2, $pop38 +; NOSIMD-NEXT: f32.mul $push40=, $pop37, $pop39 +; NOSIMD-NEXT: call $push41=, __truncsfhf2, $20 +; NOSIMD-NEXT: call $push42=, __extendhfsf2, $pop41 +; NOSIMD-NEXT: f32.add $push43=, $pop40, $pop42 +; NOSIMD-NEXT: call $push44=, __truncsfhf2, $pop43 +; NOSIMD-NEXT: i32.store16 6($0), $pop44 +; NOSIMD-NEXT: call $push45=, __truncsfhf2, $3 +; NOSIMD-NEXT: call $push46=, __extendhfsf2, $pop45 +; NOSIMD-NEXT: call $push47=, __truncsfhf2, $11 +; NOSIMD-NEXT: call $push48=, __extendhfsf2, $pop47 +; NOSIMD-NEXT: f32.mul $push49=, $pop46, $pop48 +; NOSIMD-NEXT: call $push50=, __truncsfhf2, $19 +; NOSIMD-NEXT: call $push51=, __extendhfsf2, $pop50 +; NOSIMD-NEXT: f32.add $push52=, $pop49, $pop51 +; NOSIMD-NEXT: call $push53=, __truncsfhf2, $pop52 +; NOSIMD-NEXT: i32.store16 4($0), $pop53 +; NOSIMD-NEXT: call $push54=, __truncsfhf2, $2 +; NOSIMD-NEXT: call $push55=, __extendhfsf2, $pop54 +; NOSIMD-NEXT: call $push56=, __truncsfhf2, $10 +; NOSIMD-NEXT: call $push57=, __extendhfsf2, $pop56 +; NOSIMD-NEXT: f32.mul $push58=, $pop55, $pop57 +; NOSIMD-NEXT: call $push59=, __truncsfhf2, $18 +; NOSIMD-NEXT: call $push60=, __extendhfsf2, $pop59 +; NOSIMD-NEXT: f32.add $push61=, $pop58, $pop60 +; NOSIMD-NEXT: call $push62=, __truncsfhf2, $pop61 +; NOSIMD-NEXT: i32.store16 2($0), $pop62 +; NOSIMD-NEXT: call $push63=, __truncsfhf2, $1 +; NOSIMD-NEXT: call $push64=, __extendhfsf2, $pop63 +; NOSIMD-NEXT: call $push65=, __truncsfhf2, $9 +; NOSIMD-NEXT: call $push66=, __extendhfsf2, $pop65 +; NOSIMD-NEXT: f32.mul $push67=, $pop64, $pop66 +; NOSIMD-NEXT: call $push68=, __truncsfhf2, $17 +; NOSIMD-NEXT: call $push69=, __extendhfsf2, $pop68 +; NOSIMD-NEXT: f32.add $push70=, $pop67, $pop69 +; NOSIMD-NEXT: call $push71=, __truncsfhf2, $pop70 +; NOSIMD-NEXT: i32.store16 0($0), $pop71 +; NOSIMD-NEXT: return %mul = fmul contract <8 x half> %b, %a %add = fadd contract <8 x half> %mul, %c ret <8 x half> %add } - define <4 x float> @fadd_fmul_4xf32(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; RELAXED-LABEL: fadd_fmul_4xf32: ; RELAXED: .functype fadd_fmul_4xf32 (v128, v128, v128) -> (v128) @@ -76,16 +614,412 @@ define <4 x float> @fadd_fmul_4xf32(<4 x float> %a, <4 x float> %b, <4 x float> ; STRICT-NEXT: f32x4.mul $push0=, $1, $0 ; STRICT-NEXT: f32x4.add $push1=, $pop0, $2 ; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fadd_fmul_4xf32: +; NOFP16: .functype fadd_fmul_4xf32 (v128, v128, v128) -> (v128) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f32x4.mul $push0=, $1, $0 +; NOFP16-NEXT: f32x4.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fadd_fmul_4xf32: +; NOSIMD: .functype fadd_fmul_4xf32 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f32.mul $push0=, $8, $4 +; NOSIMD-NEXT: f32.add $push1=, $pop0, $12 +; NOSIMD-NEXT: f32.store 12($0), $pop1 +; NOSIMD-NEXT: f32.mul $push2=, $7, $3 +; NOSIMD-NEXT: f32.add $push3=, $pop2, $11 +; NOSIMD-NEXT: f32.store 8($0), $pop3 +; NOSIMD-NEXT: f32.mul $push4=, $6, $2 +; NOSIMD-NEXT: f32.add $push5=, $pop4, $10 +; NOSIMD-NEXT: f32.store 4($0), $pop5 +; NOSIMD-NEXT: f32.mul $push6=, $5, $1 +; NOSIMD-NEXT: f32.add $push7=, $pop6, $9 +; NOSIMD-NEXT: f32.store 0($0), $pop7 +; NOSIMD-NEXT: return %mul = fmul <4 x float> %b, %a %add = fadd contract <4 x float> %mul, %c ret <4 x float> %add } +define <8 x half> @fmuladd_contract_8xf16(<8 x half> %a, <8 x half> %b, <8 x half> %c) { +; RELAXED-LABEL: fmuladd_contract_8xf16: +; RELAXED: .functype fmuladd_contract_8xf16 (v128, v128, v128) -> (v128) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f16x8.madd $push0=, $0, $1, $2 +; RELAXED-NEXT: return $pop0 +; +; STRICT-LABEL: fmuladd_contract_8xf16: +; STRICT: .functype fmuladd_contract_8xf16 (v128, v128, v128) -> (v128) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f16x8.madd $push0=, $0, $1, $2 +; STRICT-NEXT: return $pop0 +; +; NOFP16-LABEL: fmuladd_contract_8xf16: +; NOFP16: .functype fmuladd_contract_8xf16 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: call $push0=, __truncsfhf2, $16 +; NOFP16-NEXT: call $push1=, __extendhfsf2, $pop0 +; NOFP16-NEXT: call $push2=, __truncsfhf2, $8 +; NOFP16-NEXT: call $push3=, __extendhfsf2, $pop2 +; NOFP16-NEXT: f32.mul $push4=, $pop1, $pop3 +; NOFP16-NEXT: call $push5=, __truncsfhf2, $24 +; NOFP16-NEXT: call $push6=, __extendhfsf2, $pop5 +; NOFP16-NEXT: f32.add $push7=, $pop4, $pop6 +; NOFP16-NEXT: call $push8=, __truncsfhf2, $pop7 +; NOFP16-NEXT: i32.store16 14($0), $pop8 +; NOFP16-NEXT: call $push9=, __truncsfhf2, $15 +; NOFP16-NEXT: call $push10=, __extendhfsf2, $pop9 +; NOFP16-NEXT: call $push11=, __truncsfhf2, $7 +; NOFP16-NEXT: call $push12=, __extendhfsf2, $pop11 +; NOFP16-NEXT: f32.mul $push13=, $pop10, $pop12 +; NOFP16-NEXT: call $push14=, __truncsfhf2, $23 +; NOFP16-NEXT: call $push15=, __extendhfsf2, $pop14 +; NOFP16-NEXT: f32.add $push16=, $pop13, $pop15 +; NOFP16-NEXT: call $push17=, __truncsfhf2, $pop16 +; NOFP16-NEXT: i32.store16 12($0), $pop17 +; NOFP16-NEXT: call $push18=, __truncsfhf2, $14 +; NOFP16-NEXT: call $push19=, __extendhfsf2, $pop18 +; NOFP16-NEXT: call $push20=, __truncsfhf2, $6 +; NOFP16-NEXT: call $push21=, __extendhfsf2, $pop20 +; NOFP16-NEXT: f32.mul $push22=, $pop19, $pop21 +; NOFP16-NEXT: call $push23=, __truncsfhf2, $22 +; NOFP16-NEXT: call $push24=, __extendhfsf2, $pop23 +; NOFP16-NEXT: f32.add $push25=, $pop22, $pop24 +; NOFP16-NEXT: call $push26=, __truncsfhf2, $pop25 +; NOFP16-NEXT: i32.store16 10($0), $pop26 +; NOFP16-NEXT: call $push27=, __truncsfhf2, $13 +; NOFP16-NEXT: call $push28=, __extendhfsf2, $pop27 +; NOFP16-NEXT: call $push29=, __truncsfhf2, $5 +; NOFP16-NEXT: call $push30=, __extendhfsf2, $pop29 +; NOFP16-NEXT: f32.mul $push31=, $pop28, $pop30 +; NOFP16-NEXT: call $push32=, __truncsfhf2, $21 +; NOFP16-NEXT: call $push33=, __extendhfsf2, $pop32 +; NOFP16-NEXT: f32.add $push34=, $pop31, $pop33 +; NOFP16-NEXT: call $push35=, __truncsfhf2, $pop34 +; NOFP16-NEXT: i32.store16 8($0), $pop35 +; NOFP16-NEXT: call $push36=, __truncsfhf2, $12 +; NOFP16-NEXT: call $push37=, __extendhfsf2, $pop36 +; NOFP16-NEXT: call $push38=, __truncsfhf2, $4 +; NOFP16-NEXT: call $push39=, __extendhfsf2, $pop38 +; NOFP16-NEXT: f32.mul $push40=, $pop37, $pop39 +; NOFP16-NEXT: call $push41=, __truncsfhf2, $20 +; NOFP16-NEXT: call $push42=, __extendhfsf2, $pop41 +; NOFP16-NEXT: f32.add $push43=, $pop40, $pop42 +; NOFP16-NEXT: call $push44=, __truncsfhf2, $pop43 +; NOFP16-NEXT: i32.store16 6($0), $pop44 +; NOFP16-NEXT: call $push45=, __truncsfhf2, $11 +; NOFP16-NEXT: call $push46=, __extendhfsf2, $pop45 +; NOFP16-NEXT: call $push47=, __truncsfhf2, $3 +; NOFP16-NEXT: call $push48=, __extendhfsf2, $pop47 +; NOFP16-NEXT: f32.mul $push49=, $pop46, $pop48 +; NOFP16-NEXT: call $push50=, __truncsfhf2, $19 +; NOFP16-NEXT: call $push51=, __extendhfsf2, $pop50 +; NOFP16-NEXT: f32.add $push52=, $pop49, $pop51 +; NOFP16-NEXT: call $push53=, __truncsfhf2, $pop52 +; NOFP16-NEXT: i32.store16 4($0), $pop53 +; NOFP16-NEXT: call $push54=, __truncsfhf2, $10 +; NOFP16-NEXT: call $push55=, __extendhfsf2, $pop54 +; NOFP16-NEXT: call $push56=, __truncsfhf2, $2 +; NOFP16-NEXT: call $push57=, __extendhfsf2, $pop56 +; NOFP16-NEXT: f32.mul $push58=, $pop55, $pop57 +; NOFP16-NEXT: call $push59=, __truncsfhf2, $18 +; NOFP16-NEXT: call $push60=, __extendhfsf2, $pop59 +; NOFP16-NEXT: f32.add $push61=, $pop58, $pop60 +; NOFP16-NEXT: call $push62=, __truncsfhf2, $pop61 +; NOFP16-NEXT: i32.store16 2($0), $pop62 +; NOFP16-NEXT: call $push63=, __truncsfhf2, $9 +; NOFP16-NEXT: call $push64=, __extendhfsf2, $pop63 +; NOFP16-NEXT: call $push65=, __truncsfhf2, $1 +; NOFP16-NEXT: call $push66=, __extendhfsf2, $pop65 +; NOFP16-NEXT: f32.mul $push67=, $pop64, $pop66 +; NOFP16-NEXT: call $push68=, __truncsfhf2, $17 +; NOFP16-NEXT: call $push69=, __extendhfsf2, $pop68 +; NOFP16-NEXT: f32.add $push70=, $pop67, $pop69 +; NOFP16-NEXT: call $push71=, __truncsfhf2, $pop70 +; NOFP16-NEXT: i32.store16 0($0), $pop71 +; NOFP16-NEXT: return +; +; NOSIMD-LABEL: fmuladd_contract_8xf16: +; NOSIMD: .functype fmuladd_contract_8xf16 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: call $push0=, __truncsfhf2, $16 +; NOSIMD-NEXT: call $push1=, __extendhfsf2, $pop0 +; NOSIMD-NEXT: call $push2=, __truncsfhf2, $8 +; NOSIMD-NEXT: call $push3=, __extendhfsf2, $pop2 +; NOSIMD-NEXT: f32.mul $push4=, $pop1, $pop3 +; NOSIMD-NEXT: call $push5=, __truncsfhf2, $24 +; NOSIMD-NEXT: call $push6=, __extendhfsf2, $pop5 +; NOSIMD-NEXT: f32.add $push7=, $pop4, $pop6 +; NOSIMD-NEXT: call $push8=, __truncsfhf2, $pop7 +; NOSIMD-NEXT: i32.store16 14($0), $pop8 +; NOSIMD-NEXT: call $push9=, __truncsfhf2, $15 +; NOSIMD-NEXT: call $push10=, __extendhfsf2, $pop9 +; NOSIMD-NEXT: call $push11=, __truncsfhf2, $7 +; NOSIMD-NEXT: call $push12=, __extendhfsf2, $pop11 +; NOSIMD-NEXT: f32.mul $push13=, $pop10, $pop12 +; NOSIMD-NEXT: call $push14=, __truncsfhf2, $23 +; NOSIMD-NEXT: call $push15=, __extendhfsf2, $pop14 +; NOSIMD-NEXT: f32.add $push16=, $pop13, $pop15 +; NOSIMD-NEXT: call $push17=, __truncsfhf2, $pop16 +; NOSIMD-NEXT: i32.store16 12($0), $pop17 +; NOSIMD-NEXT: call $push18=, __truncsfhf2, $14 +; NOSIMD-NEXT: call $push19=, __extendhfsf2, $pop18 +; NOSIMD-NEXT: call $push20=, __truncsfhf2, $6 +; NOSIMD-NEXT: call $push21=, __extendhfsf2, $pop20 +; NOSIMD-NEXT: f32.mul $push22=, $pop19, $pop21 +; NOSIMD-NEXT: call $push23=, __truncsfhf2, $22 +; NOSIMD-NEXT: call $push24=, __extendhfsf2, $pop23 +; NOSIMD-NEXT: f32.add $push25=, $pop22, $pop24 +; NOSIMD-NEXT: call $push26=, __truncsfhf2, $pop25 +; NOSIMD-NEXT: i32.store16 10($0), $pop26 +; NOSIMD-NEXT: call $push27=, __truncsfhf2, $13 +; NOSIMD-NEXT: call $push28=, __extendhfsf2, $pop27 +; NOSIMD-NEXT: call $push29=, __truncsfhf2, $5 +; NOSIMD-NEXT: call $push30=, __extendhfsf2, $pop29 +; NOSIMD-NEXT: f32.mul $push31=, $pop28, $pop30 +; NOSIMD-NEXT: call $push32=, __truncsfhf2, $21 +; NOSIMD-NEXT: call $push33=, __extendhfsf2, $pop32 +; NOSIMD-NEXT: f32.add $push34=, $pop31, $pop33 +; NOSIMD-NEXT: call $push35=, __truncsfhf2, $pop34 +; NOSIMD-NEXT: i32.store16 8($0), $pop35 +; NOSIMD-NEXT: call $push36=, __truncsfhf2, $12 +; NOSIMD-NEXT: call $push37=, __extendhfsf2, $pop36 +; NOSIMD-NEXT: call $push38=, __truncsfhf2, $4 +; NOSIMD-NEXT: call $push39=, __extendhfsf2, $pop38 +; NOSIMD-NEXT: f32.mul $push40=, $pop37, $pop39 +; NOSIMD-NEXT: call $push41=, __truncsfhf2, $20 +; NOSIMD-NEXT: call $push42=, __extendhfsf2, $pop41 +; NOSIMD-NEXT: f32.add $push43=, $pop40, $pop42 +; NOSIMD-NEXT: call $push44=, __truncsfhf2, $pop43 +; NOSIMD-NEXT: i32.store16 6($0), $pop44 +; NOSIMD-NEXT: call $push45=, __truncsfhf2, $11 +; NOSIMD-NEXT: call $push46=, __extendhfsf2, $pop45 +; NOSIMD-NEXT: call $push47=, __truncsfhf2, $3 +; NOSIMD-NEXT: call $push48=, __extendhfsf2, $pop47 +; NOSIMD-NEXT: f32.mul $push49=, $pop46, $pop48 +; NOSIMD-NEXT: call $push50=, __truncsfhf2, $19 +; NOSIMD-NEXT: call $push51=, __extendhfsf2, $pop50 +; NOSIMD-NEXT: f32.add $push52=, $pop49, $pop51 +; NOSIMD-NEXT: call $push53=, __truncsfhf2, $pop52 +; NOSIMD-NEXT: i32.store16 4($0), $pop53 +; NOSIMD-NEXT: call $push54=, __truncsfhf2, $10 +; NOSIMD-NEXT: call $push55=, __extendhfsf2, $pop54 +; NOSIMD-NEXT: call $push56=, __truncsfhf2, $2 +; NOSIMD-NEXT: call $push57=, __extendhfsf2, $pop56 +; NOSIMD-NEXT: f32.mul $push58=, $pop55, $pop57 +; NOSIMD-NEXT: call $push59=, __truncsfhf2, $18 +; NOSIMD-NEXT: call $push60=, __extendhfsf2, $pop59 +; NOSIMD-NEXT: f32.add $push61=, $pop58, $pop60 +; NOSIMD-NEXT: call $push62=, __truncsfhf2, $pop61 +; NOSIMD-NEXT: i32.store16 2($0), $pop62 +; NOSIMD-NEXT: call $push63=, __truncsfhf2, $9 +; NOSIMD-NEXT: call $push64=, __extendhfsf2, $pop63 +; NOSIMD-NEXT: call $push65=, __truncsfhf2, $1 +; NOSIMD-NEXT: call $push66=, __extendhfsf2, $pop65 +; NOSIMD-NEXT: f32.mul $push67=, $pop64, $pop66 +; NOSIMD-NEXT: call $push68=, __truncsfhf2, $17 +; NOSIMD-NEXT: call $push69=, __extendhfsf2, $pop68 +; NOSIMD-NEXT: f32.add $push70=, $pop67, $pop69 +; NOSIMD-NEXT: call $push71=, __truncsfhf2, $pop70 +; NOSIMD-NEXT: i32.store16 0($0), $pop71 +; NOSIMD-NEXT: return + %fma = call contract <8 x half> @llvm.fmuladd(<8 x half> %a, <8 x half> %b, <8 x half> %c) + ret <8 x half> %fma +} + +define <8 x half> @fmuladd_8xf16(<8 x half> %a, <8 x half> %b, <8 x half> %c) { +; RELAXED-LABEL: fmuladd_8xf16: +; RELAXED: .functype fmuladd_8xf16 (v128, v128, v128) -> (v128) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f16x8.madd $push0=, $0, $1, $2 +; RELAXED-NEXT: return $pop0 +; +; STRICT-LABEL: fmuladd_8xf16: +; STRICT: .functype fmuladd_8xf16 (v128, v128, v128) -> (v128) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f16x8.madd $push0=, $0, $1, $2 +; STRICT-NEXT: return $pop0 +; +; NOFP16-LABEL: fmuladd_8xf16: +; NOFP16: .functype fmuladd_8xf16 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: call $push0=, __truncsfhf2, $16 +; NOFP16-NEXT: call $push1=, __extendhfsf2, $pop0 +; NOFP16-NEXT: call $push2=, __truncsfhf2, $8 +; NOFP16-NEXT: call $push3=, __extendhfsf2, $pop2 +; NOFP16-NEXT: f32.mul $push4=, $pop1, $pop3 +; NOFP16-NEXT: call $push5=, __truncsfhf2, $24 +; NOFP16-NEXT: call $push6=, __extendhfsf2, $pop5 +; NOFP16-NEXT: f32.add $push7=, $pop4, $pop6 +; NOFP16-NEXT: call $push8=, __truncsfhf2, $pop7 +; NOFP16-NEXT: i32.store16 14($0), $pop8 +; NOFP16-NEXT: call $push9=, __truncsfhf2, $15 +; NOFP16-NEXT: call $push10=, __extendhfsf2, $pop9 +; NOFP16-NEXT: call $push11=, __truncsfhf2, $7 +; NOFP16-NEXT: call $push12=, __extendhfsf2, $pop11 +; NOFP16-NEXT: f32.mul $push13=, $pop10, $pop12 +; NOFP16-NEXT: call $push14=, __truncsfhf2, $23 +; NOFP16-NEXT: call $push15=, __extendhfsf2, $pop14 +; NOFP16-NEXT: f32.add $push16=, $pop13, $pop15 +; NOFP16-NEXT: call $push17=, __truncsfhf2, $pop16 +; NOFP16-NEXT: i32.store16 12($0), $pop17 +; NOFP16-NEXT: call $push18=, __truncsfhf2, $14 +; NOFP16-NEXT: call $push19=, __extendhfsf2, $pop18 +; NOFP16-NEXT: call $push20=, __truncsfhf2, $6 +; NOFP16-NEXT: call $push21=, __extendhfsf2, $pop20 +; NOFP16-NEXT: f32.mul $push22=, $pop19, $pop21 +; NOFP16-NEXT: call $push23=, __truncsfhf2, $22 +; NOFP16-NEXT: call $push24=, __extendhfsf2, $pop23 +; NOFP16-NEXT: f32.add $push25=, $pop22, $pop24 +; NOFP16-NEXT: call $push26=, __truncsfhf2, $pop25 +; NOFP16-NEXT: i32.store16 10($0), $pop26 +; NOFP16-NEXT: call $push27=, __truncsfhf2, $13 +; NOFP16-NEXT: call $push28=, __extendhfsf2, $pop27 +; NOFP16-NEXT: call $push29=, __truncsfhf2, $5 +; NOFP16-NEXT: call $push30=, __extendhfsf2, $pop29 +; NOFP16-NEXT: f32.mul $push31=, $pop28, $pop30 +; NOFP16-NEXT: call $push32=, __truncsfhf2, $21 +; NOFP16-NEXT: call $push33=, __extendhfsf2, $pop32 +; NOFP16-NEXT: f32.add $push34=, $pop31, $pop33 +; NOFP16-NEXT: call $push35=, __truncsfhf2, $pop34 +; NOFP16-NEXT: i32.store16 8($0), $pop35 +; NOFP16-NEXT: call $push36=, __truncsfhf2, $12 +; NOFP16-NEXT: call $push37=, __extendhfsf2, $pop36 +; NOFP16-NEXT: call $push38=, __truncsfhf2, $4 +; NOFP16-NEXT: call $push39=, __extendhfsf2, $pop38 +; NOFP16-NEXT: f32.mul $push40=, $pop37, $pop39 +; NOFP16-NEXT: call $push41=, __truncsfhf2, $20 +; NOFP16-NEXT: call $push42=, __extendhfsf2, $pop41 +; NOFP16-NEXT: f32.add $push43=, $pop40, $pop42 +; NOFP16-NEXT: call $push44=, __truncsfhf2, $pop43 +; NOFP16-NEXT: i32.store16 6($0), $pop44 +; NOFP16-NEXT: call $push45=, __truncsfhf2, $11 +; NOFP16-NEXT: call $push46=, __extendhfsf2, $pop45 +; NOFP16-NEXT: call $push47=, __truncsfhf2, $3 +; NOFP16-NEXT: call $push48=, __extendhfsf2, $pop47 +; NOFP16-NEXT: f32.mul $push49=, $pop46, $pop48 +; NOFP16-NEXT: call $push50=, __truncsfhf2, $19 +; NOFP16-NEXT: call $push51=, __extendhfsf2, $pop50 +; NOFP16-NEXT: f32.add $push52=, $pop49, $pop51 +; NOFP16-NEXT: call $push53=, __truncsfhf2, $pop52 +; NOFP16-NEXT: i32.store16 4($0), $pop53 +; NOFP16-NEXT: call $push54=, __truncsfhf2, $10 +; NOFP16-NEXT: call $push55=, __extendhfsf2, $pop54 +; NOFP16-NEXT: call $push56=, __truncsfhf2, $2 +; NOFP16-NEXT: call $push57=, __extendhfsf2, $pop56 +; NOFP16-NEXT: f32.mul $push58=, $pop55, $pop57 +; NOFP16-NEXT: call $push59=, __truncsfhf2, $18 +; NOFP16-NEXT: call $push60=, __extendhfsf2, $pop59 +; NOFP16-NEXT: f32.add $push61=, $pop58, $pop60 +; NOFP16-NEXT: call $push62=, __truncsfhf2, $pop61 +; NOFP16-NEXT: i32.store16 2($0), $pop62 +; NOFP16-NEXT: call $push63=, __truncsfhf2, $9 +; NOFP16-NEXT: call $push64=, __extendhfsf2, $pop63 +; NOFP16-NEXT: call $push65=, __truncsfhf2, $1 +; NOFP16-NEXT: call $push66=, __extendhfsf2, $pop65 +; NOFP16-NEXT: f32.mul $push67=, $pop64, $pop66 +; NOFP16-NEXT: call $push68=, __truncsfhf2, $17 +; NOFP16-NEXT: call $push69=, __extendhfsf2, $pop68 +; NOFP16-NEXT: f32.add $push70=, $pop67, $pop69 +; NOFP16-NEXT: call $push71=, __truncsfhf2, $pop70 +; NOFP16-NEXT: i32.store16 0($0), $pop71 +; NOFP16-NEXT: return +; +; NOSIMD-LABEL: fmuladd_8xf16: +; NOSIMD: .functype fmuladd_8xf16 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: call $push0=, __truncsfhf2, $16 +; NOSIMD-NEXT: call $push1=, __extendhfsf2, $pop0 +; NOSIMD-NEXT: call $push2=, __truncsfhf2, $8 +; NOSIMD-NEXT: call $push3=, __extendhfsf2, $pop2 +; NOSIMD-NEXT: f32.mul $push4=, $pop1, $pop3 +; NOSIMD-NEXT: call $push5=, __truncsfhf2, $24 +; NOSIMD-NEXT: call $push6=, __extendhfsf2, $pop5 +; NOSIMD-NEXT: f32.add $push7=, $pop4, $pop6 +; NOSIMD-NEXT: call $push8=, __truncsfhf2, $pop7 +; NOSIMD-NEXT: i32.store16 14($0), $pop8 +; NOSIMD-NEXT: call $push9=, __truncsfhf2, $15 +; NOSIMD-NEXT: call $push10=, __extendhfsf2, $pop9 +; NOSIMD-NEXT: call $push11=, __truncsfhf2, $7 +; NOSIMD-NEXT: call $push12=, __extendhfsf2, $pop11 +; NOSIMD-NEXT: f32.mul $push13=, $pop10, $pop12 +; NOSIMD-NEXT: call $push14=, __truncsfhf2, $23 +; NOSIMD-NEXT: call $push15=, __extendhfsf2, $pop14 +; NOSIMD-NEXT: f32.add $push16=, $pop13, $pop15 +; NOSIMD-NEXT: call $push17=, __truncsfhf2, $pop16 +; NOSIMD-NEXT: i32.store16 12($0), $pop17 +; NOSIMD-NEXT: call $push18=, __truncsfhf2, $14 +; NOSIMD-NEXT: call $push19=, __extendhfsf2, $pop18 +; NOSIMD-NEXT: call $push20=, __truncsfhf2, $6 +; NOSIMD-NEXT: call $push21=, __extendhfsf2, $pop20 +; NOSIMD-NEXT: f32.mul $push22=, $pop19, $pop21 +; NOSIMD-NEXT: call $push23=, __truncsfhf2, $22 +; NOSIMD-NEXT: call $push24=, __extendhfsf2, $pop23 +; NOSIMD-NEXT: f32.add $push25=, $pop22, $pop24 +; NOSIMD-NEXT: call $push26=, __truncsfhf2, $pop25 +; NOSIMD-NEXT: i32.store16 10($0), $pop26 +; NOSIMD-NEXT: call $push27=, __truncsfhf2, $13 +; NOSIMD-NEXT: call $push28=, __extendhfsf2, $pop27 +; NOSIMD-NEXT: call $push29=, __truncsfhf2, $5 +; NOSIMD-NEXT: call $push30=, __extendhfsf2, $pop29 +; NOSIMD-NEXT: f32.mul $push31=, $pop28, $pop30 +; NOSIMD-NEXT: call $push32=, __truncsfhf2, $21 +; NOSIMD-NEXT: call $push33=, __extendhfsf2, $pop32 +; NOSIMD-NEXT: f32.add $push34=, $pop31, $pop33 +; NOSIMD-NEXT: call $push35=, __truncsfhf2, $pop34 +; NOSIMD-NEXT: i32.store16 8($0), $pop35 +; NOSIMD-NEXT: call $push36=, __truncsfhf2, $12 +; NOSIMD-NEXT: call $push37=, __extendhfsf2, $pop36 +; NOSIMD-NEXT: call $push38=, __truncsfhf2, $4 +; NOSIMD-NEXT: call $push39=, __extendhfsf2, $pop38 +; NOSIMD-NEXT: f32.mul $push40=, $pop37, $pop39 +; NOSIMD-NEXT: call $push41=, __truncsfhf2, $20 +; NOSIMD-NEXT: call $push42=, __extendhfsf2, $pop41 +; NOSIMD-NEXT: f32.add $push43=, $pop40, $pop42 +; NOSIMD-NEXT: call $push44=, __truncsfhf2, $pop43 +; NOSIMD-NEXT: i32.store16 6($0), $pop44 +; NOSIMD-NEXT: call $push45=, __truncsfhf2, $11 +; NOSIMD-NEXT: call $push46=, __extendhfsf2, $pop45 +; NOSIMD-NEXT: call $push47=, __truncsfhf2, $3 +; NOSIMD-NEXT: call $push48=, __extendhfsf2, $pop47 +; NOSIMD-NEXT: f32.mul $push49=, $pop46, $pop48 +; NOSIMD-NEXT: call $push50=, __truncsfhf2, $19 +; NOSIMD-NEXT: call $push51=, __extendhfsf2, $pop50 +; NOSIMD-NEXT: f32.add $push52=, $pop49, $pop51 +; NOSIMD-NEXT: call $push53=, __truncsfhf2, $pop52 +; NOSIMD-NEXT: i32.store16 4($0), $pop53 +; NOSIMD-NEXT: call $push54=, __truncsfhf2, $10 +; NOSIMD-NEXT: call $push55=, __extendhfsf2, $pop54 +; NOSIMD-NEXT: call $push56=, __truncsfhf2, $2 +; NOSIMD-NEXT: call $push57=, __extendhfsf2, $pop56 +; NOSIMD-NEXT: f32.mul $push58=, $pop55, $pop57 +; NOSIMD-NEXT: call $push59=, __truncsfhf2, $18 +; NOSIMD-NEXT: call $push60=, __extendhfsf2, $pop59 +; NOSIMD-NEXT: f32.add $push61=, $pop58, $pop60 +; NOSIMD-NEXT: call $push62=, __truncsfhf2, $pop61 +; NOSIMD-NEXT: i32.store16 2($0), $pop62 +; NOSIMD-NEXT: call $push63=, __truncsfhf2, $9 +; NOSIMD-NEXT: call $push64=, __extendhfsf2, $pop63 +; NOSIMD-NEXT: call $push65=, __truncsfhf2, $1 +; NOSIMD-NEXT: call $push66=, __extendhfsf2, $pop65 +; NOSIMD-NEXT: f32.mul $push67=, $pop64, $pop66 +; NOSIMD-NEXT: call $push68=, __truncsfhf2, $17 +; NOSIMD-NEXT: call $push69=, __extendhfsf2, $pop68 +; NOSIMD-NEXT: f32.add $push70=, $pop67, $pop69 +; NOSIMD-NEXT: call $push71=, __truncsfhf2, $pop70 +; NOSIMD-NEXT: i32.store16 0($0), $pop71 +; NOSIMD-NEXT: return + %fma = call <8 x half> @llvm.fmuladd(<8 x half> %a, <8 x half> %b, <8 x half> %c) + ret <8 x half> %fma +} + define <4 x float> @fmuladd_contract_4xf32(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; RELAXED-LABEL: fmuladd_contract_4xf32: ; RELAXED: .functype fmuladd_contract_4xf32 (v128, v128, v128) -> (v128) ; RELAXED-NEXT: # %bb.0: -; RELAXED-NEXT: f32x4.relaxed_madd $push0=, $2, $0, $1 +; RELAXED-NEXT: f32x4.relaxed_madd $push0=, $0, $1, $2 ; RELAXED-NEXT: return $pop0 ; ; STRICT-LABEL: fmuladd_contract_4xf32: @@ -94,18 +1028,40 @@ define <4 x float> @fmuladd_contract_4xf32(<4 x float> %a, <4 x float> %b, <4 x ; STRICT-NEXT: f32x4.mul $push0=, $0, $1 ; STRICT-NEXT: f32x4.add $push1=, $pop0, $2 ; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fmuladd_contract_4xf32: +; NOFP16: .functype fmuladd_contract_4xf32 (v128, v128, v128) -> (v128) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f32x4.mul $push0=, $0, $1 +; NOFP16-NEXT: f32x4.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fmuladd_contract_4xf32: +; NOSIMD: .functype fmuladd_contract_4xf32 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f32.mul $push0=, $4, $8 +; NOSIMD-NEXT: f32.add $push1=, $pop0, $12 +; NOSIMD-NEXT: f32.store 12($0), $pop1 +; NOSIMD-NEXT: f32.mul $push2=, $3, $7 +; NOSIMD-NEXT: f32.add $push3=, $pop2, $11 +; NOSIMD-NEXT: f32.store 8($0), $pop3 +; NOSIMD-NEXT: f32.mul $push4=, $2, $6 +; NOSIMD-NEXT: f32.add $push5=, $pop4, $10 +; NOSIMD-NEXT: f32.store 4($0), $pop5 +; NOSIMD-NEXT: f32.mul $push6=, $1, $5 +; NOSIMD-NEXT: f32.add $push7=, $pop6, $9 +; NOSIMD-NEXT: f32.store 0($0), $pop7 +; NOSIMD-NEXT: return %fma = call contract <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %b, <4 x float> %c) ret <4 x float> %fma } -; TODO: This should also have relaxed_madd in RELAXED case define <4 x float> @fmuladd_4xf32(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; RELAXED-LABEL: fmuladd_4xf32: ; RELAXED: .functype fmuladd_4xf32 (v128, v128, v128) -> (v128) ; RELAXED-NEXT: # %bb.0: -; RELAXED-NEXT: f32x4.mul $push0=, $0, $1 -; RELAXED-NEXT: f32x4.add $push1=, $pop0, $2 -; RELAXED-NEXT: return $pop1 +; RELAXED-NEXT: f32x4.relaxed_madd $push0=, $0, $1, $2 +; RELAXED-NEXT: return $pop0 ; ; STRICT-LABEL: fmuladd_4xf32: ; STRICT: .functype fmuladd_4xf32 (v128, v128, v128) -> (v128) @@ -113,10 +1069,170 @@ define <4 x float> @fmuladd_4xf32(<4 x float> %a, <4 x float> %b, <4 x float> %c ; STRICT-NEXT: f32x4.mul $push0=, $0, $1 ; STRICT-NEXT: f32x4.add $push1=, $pop0, $2 ; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fmuladd_4xf32: +; NOFP16: .functype fmuladd_4xf32 (v128, v128, v128) -> (v128) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f32x4.mul $push0=, $0, $1 +; NOFP16-NEXT: f32x4.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fmuladd_4xf32: +; NOSIMD: .functype fmuladd_4xf32 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f32.mul $push0=, $4, $8 +; NOSIMD-NEXT: f32.add $push1=, $pop0, $12 +; NOSIMD-NEXT: f32.store 12($0), $pop1 +; NOSIMD-NEXT: f32.mul $push2=, $3, $7 +; NOSIMD-NEXT: f32.add $push3=, $pop2, $11 +; NOSIMD-NEXT: f32.store 8($0), $pop3 +; NOSIMD-NEXT: f32.mul $push4=, $2, $6 +; NOSIMD-NEXT: f32.add $push5=, $pop4, $10 +; NOSIMD-NEXT: f32.store 4($0), $pop5 +; NOSIMD-NEXT: f32.mul $push6=, $1, $5 +; NOSIMD-NEXT: f32.add $push7=, $pop6, $9 +; NOSIMD-NEXT: f32.store 0($0), $pop7 +; NOSIMD-NEXT: return %fma = call <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %b, <4 x float> %c) ret <4 x float> %fma } +define <8 x float> @fmuladd_8xf32(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; RELAXED-LABEL: fmuladd_8xf32: +; RELAXED: .functype fmuladd_8xf32 (i32, v128, v128, v128, v128, v128, v128) -> () +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f32x4.mul $push0=, $2, $4 +; RELAXED-NEXT: f32x4.add $push1=, $pop0, $6 +; RELAXED-NEXT: v128.store 16($0), $pop1 +; RELAXED-NEXT: f32x4.mul $push2=, $1, $3 +; RELAXED-NEXT: f32x4.add $push3=, $pop2, $5 +; RELAXED-NEXT: v128.store 0($0), $pop3 +; RELAXED-NEXT: return +; +; STRICT-LABEL: fmuladd_8xf32: +; STRICT: .functype fmuladd_8xf32 (i32, v128, v128, v128, v128, v128, v128) -> () +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f32x4.mul $push0=, $2, $4 +; STRICT-NEXT: f32x4.add $push1=, $pop0, $6 +; STRICT-NEXT: v128.store 16($0), $pop1 +; STRICT-NEXT: f32x4.mul $push2=, $1, $3 +; STRICT-NEXT: f32x4.add $push3=, $pop2, $5 +; STRICT-NEXT: v128.store 0($0), $pop3 +; STRICT-NEXT: return +; +; NOFP16-LABEL: fmuladd_8xf32: +; NOFP16: .functype fmuladd_8xf32 (i32, v128, v128, v128, v128, v128, v128) -> () +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f32x4.mul $push0=, $2, $4 +; NOFP16-NEXT: f32x4.add $push1=, $pop0, $6 +; NOFP16-NEXT: v128.store 16($0), $pop1 +; NOFP16-NEXT: f32x4.mul $push2=, $1, $3 +; NOFP16-NEXT: f32x4.add $push3=, $pop2, $5 +; NOFP16-NEXT: v128.store 0($0), $pop3 +; NOFP16-NEXT: return +; +; NOSIMD-LABEL: fmuladd_8xf32: +; NOSIMD: .functype fmuladd_8xf32 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f32.mul $push0=, $8, $16 +; NOSIMD-NEXT: f32.add $push1=, $pop0, $24 +; NOSIMD-NEXT: f32.store 28($0), $pop1 +; NOSIMD-NEXT: f32.mul $push2=, $7, $15 +; NOSIMD-NEXT: f32.add $push3=, $pop2, $23 +; NOSIMD-NEXT: f32.store 24($0), $pop3 +; NOSIMD-NEXT: f32.mul $push4=, $6, $14 +; NOSIMD-NEXT: f32.add $push5=, $pop4, $22 +; NOSIMD-NEXT: f32.store 20($0), $pop5 +; NOSIMD-NEXT: f32.mul $push6=, $5, $13 +; NOSIMD-NEXT: f32.add $push7=, $pop6, $21 +; NOSIMD-NEXT: f32.store 16($0), $pop7 +; NOSIMD-NEXT: f32.mul $push8=, $4, $12 +; NOSIMD-NEXT: f32.add $push9=, $pop8, $20 +; NOSIMD-NEXT: f32.store 12($0), $pop9 +; NOSIMD-NEXT: f32.mul $push10=, $3, $11 +; NOSIMD-NEXT: f32.add $push11=, $pop10, $19 +; NOSIMD-NEXT: f32.store 8($0), $pop11 +; NOSIMD-NEXT: f32.mul $push12=, $2, $10 +; NOSIMD-NEXT: f32.add $push13=, $pop12, $18 +; NOSIMD-NEXT: f32.store 4($0), $pop13 +; NOSIMD-NEXT: f32.mul $push14=, $1, $9 +; NOSIMD-NEXT: f32.add $push15=, $pop14, $17 +; NOSIMD-NEXT: f32.store 0($0), $pop15 +; NOSIMD-NEXT: return + %fma = call <8 x float> @llvm.fmuladd(<8 x float> %a, <8 x float> %b, <8 x float> %c) + ret <8 x float> %fma +} + +define <2 x double> @fmuladd_contract_2xf64(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; RELAXED-LABEL: fmuladd_contract_2xf64: +; RELAXED: .functype fmuladd_contract_2xf64 (v128, v128, v128) -> (v128) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f64x2.relaxed_madd $push0=, $0, $1, $2 +; RELAXED-NEXT: return $pop0 +; +; STRICT-LABEL: fmuladd_contract_2xf64: +; STRICT: .functype fmuladd_contract_2xf64 (v128, v128, v128) -> (v128) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f64x2.mul $push0=, $0, $1 +; STRICT-NEXT: f64x2.add $push1=, $pop0, $2 +; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fmuladd_contract_2xf64: +; NOFP16: .functype fmuladd_contract_2xf64 (v128, v128, v128) -> (v128) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f64x2.mul $push0=, $0, $1 +; NOFP16-NEXT: f64x2.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fmuladd_contract_2xf64: +; NOSIMD: .functype fmuladd_contract_2xf64 (i32, f64, f64, f64, f64, f64, f64) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f64.mul $push0=, $2, $4 +; NOSIMD-NEXT: f64.add $push1=, $pop0, $6 +; NOSIMD-NEXT: f64.store 8($0), $pop1 +; NOSIMD-NEXT: f64.mul $push2=, $1, $3 +; NOSIMD-NEXT: f64.add $push3=, $pop2, $5 +; NOSIMD-NEXT: f64.store 0($0), $pop3 +; NOSIMD-NEXT: return + %fma = call contract <2 x double> @llvm.fmuladd(<2 x double> %a, <2 x double> %b, <2 x double> %c) + ret <2 x double> %fma +} + +define <2 x double> @fmuladd_2xf64(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; RELAXED-LABEL: fmuladd_2xf64: +; RELAXED: .functype fmuladd_2xf64 (v128, v128, v128) -> (v128) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f64x2.relaxed_madd $push0=, $0, $1, $2 +; RELAXED-NEXT: return $pop0 +; +; STRICT-LABEL: fmuladd_2xf64: +; STRICT: .functype fmuladd_2xf64 (v128, v128, v128) -> (v128) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f64x2.mul $push0=, $0, $1 +; STRICT-NEXT: f64x2.add $push1=, $pop0, $2 +; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fmuladd_2xf64: +; NOFP16: .functype fmuladd_2xf64 (v128, v128, v128) -> (v128) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f64x2.mul $push0=, $0, $1 +; NOFP16-NEXT: f64x2.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fmuladd_2xf64: +; NOSIMD: .functype fmuladd_2xf64 (i32, f64, f64, f64, f64, f64, f64) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f64.mul $push0=, $2, $4 +; NOSIMD-NEXT: f64.add $push1=, $pop0, $6 +; NOSIMD-NEXT: f64.store 8($0), $pop1 +; NOSIMD-NEXT: f64.mul $push2=, $1, $3 +; NOSIMD-NEXT: f64.add $push3=, $pop2, $5 +; NOSIMD-NEXT: f64.store 0($0), $pop3 +; NOSIMD-NEXT: return + %fma = call <2 x double> @llvm.fmuladd(<2 x double> %a, <2 x double> %b, <2 x double> %c) + ret <2 x double> %fma +} + define <4 x float> @fma_4xf32(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; RELAXED-LABEL: fma_4xf32: ; RELAXED: .functype fma_4xf32 (v128, v128, v128) -> (v128) @@ -167,6 +1283,44 @@ define <4 x float> @fma_4xf32(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; STRICT-NEXT: call $push18=, fmaf, $pop17, $pop16, $pop15 ; STRICT-NEXT: f32x4.replace_lane $push19=, $pop14, 3, $pop18 ; STRICT-NEXT: return $pop19 +; +; NOFP16-LABEL: fma_4xf32: +; NOFP16: .functype fma_4xf32 (v128, v128, v128) -> (v128) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f32x4.extract_lane $push2=, $0, 0 +; NOFP16-NEXT: f32x4.extract_lane $push1=, $1, 0 +; NOFP16-NEXT: f32x4.extract_lane $push0=, $2, 0 +; NOFP16-NEXT: call $push3=, fmaf, $pop2, $pop1, $pop0 +; NOFP16-NEXT: f32x4.splat $push4=, $pop3 +; NOFP16-NEXT: f32x4.extract_lane $push7=, $0, 1 +; NOFP16-NEXT: f32x4.extract_lane $push6=, $1, 1 +; NOFP16-NEXT: f32x4.extract_lane $push5=, $2, 1 +; NOFP16-NEXT: call $push8=, fmaf, $pop7, $pop6, $pop5 +; NOFP16-NEXT: f32x4.replace_lane $push9=, $pop4, 1, $pop8 +; NOFP16-NEXT: f32x4.extract_lane $push12=, $0, 2 +; NOFP16-NEXT: f32x4.extract_lane $push11=, $1, 2 +; NOFP16-NEXT: f32x4.extract_lane $push10=, $2, 2 +; NOFP16-NEXT: call $push13=, fmaf, $pop12, $pop11, $pop10 +; NOFP16-NEXT: f32x4.replace_lane $push14=, $pop9, 2, $pop13 +; NOFP16-NEXT: f32x4.extract_lane $push17=, $0, 3 +; NOFP16-NEXT: f32x4.extract_lane $push16=, $1, 3 +; NOFP16-NEXT: f32x4.extract_lane $push15=, $2, 3 +; NOFP16-NEXT: call $push18=, fmaf, $pop17, $pop16, $pop15 +; NOFP16-NEXT: f32x4.replace_lane $push19=, $pop14, 3, $pop18 +; NOFP16-NEXT: return $pop19 +; +; NOSIMD-LABEL: fma_4xf32: +; NOSIMD: .functype fma_4xf32 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: call $push0=, fmaf, $4, $8, $12 +; NOSIMD-NEXT: f32.store 12($0), $pop0 +; NOSIMD-NEXT: call $push1=, fmaf, $3, $7, $11 +; NOSIMD-NEXT: f32.store 8($0), $pop1 +; NOSIMD-NEXT: call $push2=, fmaf, $2, $6, $10 +; NOSIMD-NEXT: f32.store 4($0), $pop2 +; NOSIMD-NEXT: call $push3=, fmaf, $1, $5, $9 +; NOSIMD-NEXT: f32.store 0($0), $pop3 +; NOSIMD-NEXT: return %fma = call <4 x float> @llvm.fma(<4 x float> %a, <4 x float> %b, <4 x float> %c) ret <4 x float> %fma } @@ -176,9 +1330,9 @@ define <8 x float> @fadd_fmul_contract_8xf32(<8 x float> %a, <8 x float> %b, <8 ; RELAXED-LABEL: fadd_fmul_contract_8xf32: ; RELAXED: .functype fadd_fmul_contract_8xf32 (i32, v128, v128, v128, v128, v128, v128) -> () ; RELAXED-NEXT: # %bb.0: -; RELAXED-NEXT: f32x4.relaxed_madd $push0=, $6, $4, $2 +; RELAXED-NEXT: f32x4.relaxed_madd $push0=, $4, $2, $6 ; RELAXED-NEXT: v128.store 16($0), $pop0 -; RELAXED-NEXT: f32x4.relaxed_madd $push1=, $5, $3, $1 +; RELAXED-NEXT: f32x4.relaxed_madd $push1=, $3, $1, $5 ; RELAXED-NEXT: v128.store 0($0), $pop1 ; RELAXED-NEXT: return ; @@ -192,17 +1346,56 @@ define <8 x float> @fadd_fmul_contract_8xf32(<8 x float> %a, <8 x float> %b, <8 ; STRICT-NEXT: f32x4.add $push3=, $pop2, $5 ; STRICT-NEXT: v128.store 0($0), $pop3 ; STRICT-NEXT: return +; +; NOFP16-LABEL: fadd_fmul_contract_8xf32: +; NOFP16: .functype fadd_fmul_contract_8xf32 (i32, v128, v128, v128, v128, v128, v128) -> () +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f32x4.mul $push0=, $4, $2 +; NOFP16-NEXT: f32x4.add $push1=, $pop0, $6 +; NOFP16-NEXT: v128.store 16($0), $pop1 +; NOFP16-NEXT: f32x4.mul $push2=, $3, $1 +; NOFP16-NEXT: f32x4.add $push3=, $pop2, $5 +; NOFP16-NEXT: v128.store 0($0), $pop3 +; NOFP16-NEXT: return +; +; NOSIMD-LABEL: fadd_fmul_contract_8xf32: +; NOSIMD: .functype fadd_fmul_contract_8xf32 (i32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f32.mul $push0=, $16, $8 +; NOSIMD-NEXT: f32.add $push1=, $pop0, $24 +; NOSIMD-NEXT: f32.store 28($0), $pop1 +; NOSIMD-NEXT: f32.mul $push2=, $15, $7 +; NOSIMD-NEXT: f32.add $push3=, $pop2, $23 +; NOSIMD-NEXT: f32.store 24($0), $pop3 +; NOSIMD-NEXT: f32.mul $push4=, $14, $6 +; NOSIMD-NEXT: f32.add $push5=, $pop4, $22 +; NOSIMD-NEXT: f32.store 20($0), $pop5 +; NOSIMD-NEXT: f32.mul $push6=, $13, $5 +; NOSIMD-NEXT: f32.add $push7=, $pop6, $21 +; NOSIMD-NEXT: f32.store 16($0), $pop7 +; NOSIMD-NEXT: f32.mul $push8=, $12, $4 +; NOSIMD-NEXT: f32.add $push9=, $pop8, $20 +; NOSIMD-NEXT: f32.store 12($0), $pop9 +; NOSIMD-NEXT: f32.mul $push10=, $11, $3 +; NOSIMD-NEXT: f32.add $push11=, $pop10, $19 +; NOSIMD-NEXT: f32.store 8($0), $pop11 +; NOSIMD-NEXT: f32.mul $push12=, $10, $2 +; NOSIMD-NEXT: f32.add $push13=, $pop12, $18 +; NOSIMD-NEXT: f32.store 4($0), $pop13 +; NOSIMD-NEXT: f32.mul $push14=, $9, $1 +; NOSIMD-NEXT: f32.add $push15=, $pop14, $17 +; NOSIMD-NEXT: f32.store 0($0), $pop15 +; NOSIMD-NEXT: return %mul = fmul contract <8 x float> %b, %a %add = fadd contract <8 x float> %mul, %c ret <8 x float> %add } - define <2 x double> @fadd_fmul_contract_2xf64(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; RELAXED-LABEL: fadd_fmul_contract_2xf64: ; RELAXED: .functype fadd_fmul_contract_2xf64 (v128, v128, v128) -> (v128) ; RELAXED-NEXT: # %bb.0: -; RELAXED-NEXT: f64x2.relaxed_madd $push0=, $2, $1, $0 +; RELAXED-NEXT: f64x2.relaxed_madd $push0=, $1, $0, $2 ; RELAXED-NEXT: return $pop0 ; ; STRICT-LABEL: fadd_fmul_contract_2xf64: @@ -211,28 +1404,64 @@ define <2 x double> @fadd_fmul_contract_2xf64(<2 x double> %a, <2 x double> %b, ; STRICT-NEXT: f64x2.mul $push0=, $1, $0 ; STRICT-NEXT: f64x2.add $push1=, $pop0, $2 ; STRICT-NEXT: return $pop1 +; +; NOFP16-LABEL: fadd_fmul_contract_2xf64: +; NOFP16: .functype fadd_fmul_contract_2xf64 (v128, v128, v128) -> (v128) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f64x2.mul $push0=, $1, $0 +; NOFP16-NEXT: f64x2.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fadd_fmul_contract_2xf64: +; NOSIMD: .functype fadd_fmul_contract_2xf64 (i32, f64, f64, f64, f64, f64, f64) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f64.mul $push0=, $4, $2 +; NOSIMD-NEXT: f64.add $push1=, $pop0, $6 +; NOSIMD-NEXT: f64.store 8($0), $pop1 +; NOSIMD-NEXT: f64.mul $push2=, $3, $1 +; NOSIMD-NEXT: f64.add $push3=, $pop2, $5 +; NOSIMD-NEXT: f64.store 0($0), $pop3 +; NOSIMD-NEXT: return %mul = fmul contract <2 x double> %b, %a %add = fadd contract <2 x double> %mul, %c ret <2 x double> %add } -define float @fadd_fmul_contract_f32(float %a, float %b, float %c) { -; RELAXED-LABEL: fadd_fmul_contract_f32: -; RELAXED: .functype fadd_fmul_contract_f32 (f32, f32, f32) -> (f32) +define <2 x double> @fadd_fmul_2xf64(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; RELAXED-LABEL: fadd_fmul_2xf64: +; RELAXED: .functype fadd_fmul_2xf64 (v128, v128, v128) -> (v128) ; RELAXED-NEXT: # %bb.0: -; RELAXED-NEXT: f32.mul $push0=, $1, $0 -; RELAXED-NEXT: f32.add $push1=, $pop0, $2 +; RELAXED-NEXT: f64x2.mul $push0=, $1, $0 +; RELAXED-NEXT: f64x2.add $push1=, $pop0, $2 ; RELAXED-NEXT: return $pop1 ; -; STRICT-LABEL: fadd_fmul_contract_f32: -; STRICT: .functype fadd_fmul_contract_f32 (f32, f32, f32) -> (f32) +; STRICT-LABEL: fadd_fmul_2xf64: +; STRICT: .functype fadd_fmul_2xf64 (v128, v128, v128) -> (v128) ; STRICT-NEXT: # %bb.0: -; STRICT-NEXT: f32.mul $push0=, $1, $0 -; STRICT-NEXT: f32.add $push1=, $pop0, $2 +; STRICT-NEXT: f64x2.mul $push0=, $1, $0 +; STRICT-NEXT: f64x2.add $push1=, $pop0, $2 ; STRICT-NEXT: return $pop1 - %mul = fmul contract float %b, %a - %add = fadd contract float %mul, %c - ret float %add +; +; NOFP16-LABEL: fadd_fmul_2xf64: +; NOFP16: .functype fadd_fmul_2xf64 (v128, v128, v128) -> (v128) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: f64x2.mul $push0=, $1, $0 +; NOFP16-NEXT: f64x2.add $push1=, $pop0, $2 +; NOFP16-NEXT: return $pop1 +; +; NOSIMD-LABEL: fadd_fmul_2xf64: +; NOSIMD: .functype fadd_fmul_2xf64 (i32, f64, f64, f64, f64, f64, f64) -> () +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: f64.mul $push0=, $4, $2 +; NOSIMD-NEXT: f64.add $push1=, $pop0, $6 +; NOSIMD-NEXT: f64.store 8($0), $pop1 +; NOSIMD-NEXT: f64.mul $push2=, $3, $1 +; NOSIMD-NEXT: f64.add $push3=, $pop2, $5 +; NOSIMD-NEXT: f64.store 0($0), $pop3 +; NOSIMD-NEXT: return + %mul = fmul <2 x double> %b, %a + %add = fadd <2 x double> %mul, %c + ret <2 x double> %add } define float @fma_f32(float %a, float %b, float %c) { @@ -247,6 +1476,18 @@ define float @fma_f32(float %a, float %b, float %c) { ; STRICT-NEXT: # %bb.0: ; STRICT-NEXT: call $push0=, fmaf, $0, $1, $2 ; STRICT-NEXT: return $pop0 +; +; NOFP16-LABEL: fma_f32: +; NOFP16: .functype fma_f32 (f32, f32, f32) -> (f32) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: call $push0=, fmaf, $0, $1, $2 +; NOFP16-NEXT: return $pop0 +; +; NOSIMD-LABEL: fma_f32: +; NOSIMD: .functype fma_f32 (f32, f32, f32) -> (f32) +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: call $push0=, fmaf, $0, $1, $2 +; NOSIMD-NEXT: return $pop0 %fma = call float @llvm.fma(float %a, float %b, float %c) ret float %fma } @@ -263,6 +1504,18 @@ define double @fma_f64(double %a, double %b, double %c) { ; STRICT-NEXT: # %bb.0: ; STRICT-NEXT: call $push0=, fma, $0, $1, $2 ; STRICT-NEXT: return $pop0 +; +; NOFP16-LABEL: fma_f64: +; NOFP16: .functype fma_f64 (f64, f64, f64) -> (f64) +; NOFP16-NEXT: # %bb.0: +; NOFP16-NEXT: call $push0=, fma, $0, $1, $2 +; NOFP16-NEXT: return $pop0 +; +; NOSIMD-LABEL: fma_f64: +; NOSIMD: .functype fma_f64 (f64, f64, f64) -> (f64) +; NOSIMD-NEXT: # %bb.0: +; NOSIMD-NEXT: call $push0=, fma, $0, $1, $2 +; NOSIMD-NEXT: return $pop0 %fma = call double @llvm.fma(double %a, double %b, double %c) ret double %fma } diff --git a/llvm/test/CodeGen/WebAssembly/simd-relaxed-fnma.ll b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fnma.ll index 6e2d860..b90c1da 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-relaxed-fnma.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fnma.ll @@ -27,7 +27,7 @@ define <4 x float> @fsub_fmul_contract_4xf32(<4 x float> %a, <4 x float> %b, <4 ; RELAXED-LABEL: fsub_fmul_contract_4xf32: ; RELAXED: .functype fsub_fmul_contract_4xf32 (v128, v128, v128) -> (v128) ; RELAXED-NEXT: # %bb.0: -; RELAXED-NEXT: f32x4.relaxed_nmadd $push0=, $2, $1, $0 +; RELAXED-NEXT: f32x4.relaxed_nmadd $push0=, $1, $0, $2 ; RELAXED-NEXT: return $pop0 ; ; STRICT-LABEL: fsub_fmul_contract_4xf32: @@ -46,15 +46,14 @@ define <8 x half> @fsub_fmul_contract_8xf16(<8 x half> %a, <8 x half> %b, <8 x h ; RELAXED-LABEL: fsub_fmul_contract_8xf16: ; RELAXED: .functype fsub_fmul_contract_8xf16 (v128, v128, v128) -> (v128) ; RELAXED-NEXT: # %bb.0: -; RELAXED-NEXT: f16x8.relaxed_nmadd $push0=, $2, $1, $0 +; RELAXED-NEXT: f16x8.nmadd $push0=, $1, $0, $2 ; RELAXED-NEXT: return $pop0 ; ; STRICT-LABEL: fsub_fmul_contract_8xf16: ; STRICT: .functype fsub_fmul_contract_8xf16 (v128, v128, v128) -> (v128) ; STRICT-NEXT: # %bb.0: -; STRICT-NEXT: f16x8.mul $push0=, $1, $0 -; STRICT-NEXT: f16x8.sub $push1=, $2, $pop0 -; STRICT-NEXT: return $pop1 +; STRICT-NEXT: f16x8.nmadd $push0=, $1, $0, $2 +; STRICT-NEXT: return $pop0 %mul = fmul contract <8 x half> %b, %a %sub = fsub contract <8 x half> %c, %mul ret <8 x half> %sub @@ -84,9 +83,9 @@ define <8 x float> @fsub_fmul_contract_8xf32(<8 x float> %a, <8 x float> %b, <8 ; RELAXED-LABEL: fsub_fmul_contract_8xf32: ; RELAXED: .functype fsub_fmul_contract_8xf32 (i32, v128, v128, v128, v128, v128, v128) -> () ; RELAXED-NEXT: # %bb.0: -; RELAXED-NEXT: f32x4.relaxed_nmadd $push0=, $6, $4, $2 +; RELAXED-NEXT: f32x4.relaxed_nmadd $push0=, $4, $2, $6 ; RELAXED-NEXT: v128.store 16($0), $pop0 -; RELAXED-NEXT: f32x4.relaxed_nmadd $push1=, $5, $3, $1 +; RELAXED-NEXT: f32x4.relaxed_nmadd $push1=, $3, $1, $5 ; RELAXED-NEXT: v128.store 0($0), $pop1 ; RELAXED-NEXT: return ; @@ -110,7 +109,7 @@ define <2 x double> @fsub_fmul_contract_2xf64(<2 x double> %a, <2 x double> %b, ; RELAXED-LABEL: fsub_fmul_contract_2xf64: ; RELAXED: .functype fsub_fmul_contract_2xf64 (v128, v128, v128) -> (v128) ; RELAXED-NEXT: # %bb.0: -; RELAXED-NEXT: f64x2.relaxed_nmadd $push0=, $2, $1, $0 +; RELAXED-NEXT: f64x2.relaxed_nmadd $push0=, $1, $0, $2 ; RELAXED-NEXT: return $pop0 ; ; STRICT-LABEL: fsub_fmul_contract_2xf64: @@ -143,3 +142,55 @@ define float @fsub_fmul_contract_f32(float %a, float %b, float %c) { ret float %sub } +define <8 x half> @fmuladd_8xf16(<8 x half> %a, <8 x half> %b, <8 x half> %c) { +; RELAXED-LABEL: fmuladd_8xf16: +; RELAXED: .functype fmuladd_8xf16 (v128, v128, v128) -> (v128) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f16x8.nmadd $push0=, $0, $1, $2 +; RELAXED-NEXT: return $pop0 +; +; STRICT-LABEL: fmuladd_8xf16: +; STRICT: .functype fmuladd_8xf16 (v128, v128, v128) -> (v128) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f16x8.nmadd $push0=, $0, $1, $2 +; STRICT-NEXT: return $pop0 + %fneg = fneg <8 x half> %a + %fma = call <8 x half> @llvm.fmuladd(<8 x half> %fneg, <8 x half> %b, <8 x half> %c) + ret <8 x half> %fma +} + +define <4 x float> @fmuladd_4xf32(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; RELAXED-LABEL: fmuladd_4xf32: +; RELAXED: .functype fmuladd_4xf32 (v128, v128, v128) -> (v128) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f32x4.relaxed_nmadd $push0=, $0, $1, $2 +; RELAXED-NEXT: return $pop0 +; +; STRICT-LABEL: fmuladd_4xf32: +; STRICT: .functype fmuladd_4xf32 (v128, v128, v128) -> (v128) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f32x4.mul $push0=, $0, $1 +; STRICT-NEXT: f32x4.sub $push1=, $2, $pop0 +; STRICT-NEXT: return $pop1 + %fneg = fneg <4 x float> %a + %fma = call <4 x float> @llvm.fmuladd(<4 x float> %fneg, <4 x float> %b, <4 x float> %c) + ret <4 x float> %fma +} + +define <2 x double> @fmuladd_2xf64(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; RELAXED-LABEL: fmuladd_2xf64: +; RELAXED: .functype fmuladd_2xf64 (v128, v128, v128) -> (v128) +; RELAXED-NEXT: # %bb.0: +; RELAXED-NEXT: f64x2.relaxed_nmadd $push0=, $0, $1, $2 +; RELAXED-NEXT: return $pop0 +; +; STRICT-LABEL: fmuladd_2xf64: +; STRICT: .functype fmuladd_2xf64 (v128, v128, v128) -> (v128) +; STRICT-NEXT: # %bb.0: +; STRICT-NEXT: f64x2.mul $push0=, $0, $1 +; STRICT-NEXT: f64x2.sub $push1=, $2, $pop0 +; STRICT-NEXT: return $pop1 + %fneg = fneg <2 x double> %a + %fma = call <2 x double> @llvm.fmuladd(<2 x double> %fneg, <2 x double> %b, <2 x double> %c) + ret <2 x double> %fma +} diff --git a/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll b/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll index 28b4541..7bdc4e1 100644 --- a/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll +++ b/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll @@ -44,7 +44,7 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) { ; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi ; CHECK-NEXT: callq __ubyte_convert_to_ctype ; CHECK-NEXT: testl %eax, %eax -; CHECK-NEXT: js LBB0_6 +; CHECK-NEXT: js LBB0_4 ; CHECK-NEXT: ## %bb.1: ## %cond_next.i ; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi ; CHECK-NEXT: movq %rbx, %rdi @@ -53,84 +53,81 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) { ; CHECK-NEXT: sarl $31, %ecx ; CHECK-NEXT: andl %eax, %ecx ; CHECK-NEXT: cmpl $-2, %ecx -; CHECK-NEXT: je LBB0_10 +; CHECK-NEXT: je LBB0_8 ; CHECK-NEXT: ## %bb.2: ## %cond_next.i ; CHECK-NEXT: cmpl $-1, %ecx -; CHECK-NEXT: jne LBB0_3 -; CHECK-NEXT: LBB0_8: ## %bb4 +; CHECK-NEXT: jne LBB0_6 +; CHECK-NEXT: LBB0_3: ## %bb4 ; CHECK-NEXT: movq _PyArray_API@GOTPCREL(%rip), %rax ; CHECK-NEXT: movq (%rax), %rax ; CHECK-NEXT: movq 16(%rax), %rax -; CHECK-NEXT: jmp LBB0_9 -; CHECK-NEXT: LBB0_6: ## %_ubyte_convert2_to_ctypes.exit +; CHECK-NEXT: jmp LBB0_10 +; CHECK-NEXT: LBB0_4: ## %_ubyte_convert2_to_ctypes.exit ; CHECK-NEXT: cmpl $-2, %eax -; CHECK-NEXT: je LBB0_10 -; CHECK-NEXT: ## %bb.7: ## %_ubyte_convert2_to_ctypes.exit -; CHECK-NEXT: cmpl $-1, %eax ; CHECK-NEXT: je LBB0_8 -; CHECK-NEXT: LBB0_3: ## %bb35 +; CHECK-NEXT: ## %bb.5: ## %_ubyte_convert2_to_ctypes.exit +; CHECK-NEXT: cmpl $-1, %eax +; CHECK-NEXT: je LBB0_3 +; CHECK-NEXT: LBB0_6: ## %bb35 ; CHECK-NEXT: movq _PyUFunc_API@GOTPCREL(%rip), %r14 ; CHECK-NEXT: movq (%r14), %rax ; CHECK-NEXT: callq *216(%rax) ; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %edx ; CHECK-NEXT: testb %dl, %dl -; CHECK-NEXT: je LBB0_4 -; CHECK-NEXT: ## %bb.12: ## %cond_false.i -; CHECK-NEXT: setne %dil +; CHECK-NEXT: je LBB0_11 +; CHECK-NEXT: ## %bb.7: ## %cond_false.i ; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %esi ; CHECK-NEXT: movzbl %sil, %ecx ; CHECK-NEXT: movl %ecx, %eax ; CHECK-NEXT: divb %dl ; CHECK-NEXT: movl %eax, %r15d ; CHECK-NEXT: testb %cl, %cl -; CHECK-NEXT: setne %al -; CHECK-NEXT: testb %dil, %al -; CHECK-NEXT: jne LBB0_5 -; CHECK-NEXT: LBB0_13: ## %cond_true.i200 -; CHECK-NEXT: testb %dl, %dl -; CHECK-NEXT: jne LBB0_15 -; CHECK-NEXT: ## %bb.14: ## %cond_true14.i -; CHECK-NEXT: movl $4, %edi -; CHECK-NEXT: callq _feraiseexcept -; CHECK-NEXT: LBB0_15: ## %ubyte_ctype_remainder.exit -; CHECK-NEXT: xorl %ebx, %ebx -; CHECK-NEXT: jmp LBB0_16 -; CHECK-NEXT: LBB0_10: ## %bb17 +; CHECK-NEXT: jne LBB0_12 +; CHECK-NEXT: jmp LBB0_14 +; CHECK-NEXT: LBB0_8: ## %bb17 ; CHECK-NEXT: callq _PyErr_Occurred ; CHECK-NEXT: testq %rax, %rax -; CHECK-NEXT: jne LBB0_23 -; CHECK-NEXT: ## %bb.11: ## %cond_next +; CHECK-NEXT: jne LBB0_27 +; CHECK-NEXT: ## %bb.9: ## %cond_next ; CHECK-NEXT: movq _PyArray_API@GOTPCREL(%rip), %rax ; CHECK-NEXT: movq (%rax), %rax ; CHECK-NEXT: movq 80(%rax), %rax -; CHECK-NEXT: LBB0_9: ## %bb4 +; CHECK-NEXT: LBB0_10: ## %bb4 ; CHECK-NEXT: movq 96(%rax), %rax ; CHECK-NEXT: movq %r14, %rdi ; CHECK-NEXT: movq %rbx, %rsi ; CHECK-NEXT: callq *40(%rax) -; CHECK-NEXT: jmp LBB0_24 -; CHECK-NEXT: LBB0_4: ## %cond_true.i +; CHECK-NEXT: jmp LBB0_28 +; CHECK-NEXT: LBB0_11: ## %cond_true.i ; CHECK-NEXT: movl $4, %edi ; CHECK-NEXT: callq _feraiseexcept ; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %edx ; CHECK-NEXT: movzbl {{[0-9]+}}(%rsp), %esi +; CHECK-NEXT: xorl %r15d, %r15d ; CHECK-NEXT: testb %sil, %sil -; CHECK-NEXT: sete %al +; CHECK-NEXT: je LBB0_14 +; CHECK-NEXT: LBB0_12: ## %cond_false.i ; CHECK-NEXT: testb %dl, %dl -; CHECK-NEXT: sete %cl -; CHECK-NEXT: xorl %r15d, %r15d -; CHECK-NEXT: orb %al, %cl -; CHECK-NEXT: jne LBB0_13 -; CHECK-NEXT: LBB0_5: ## %cond_next17.i +; CHECK-NEXT: je LBB0_14 +; CHECK-NEXT: ## %bb.13: ## %cond_next17.i ; CHECK-NEXT: movzbl %sil, %eax ; CHECK-NEXT: divb %dl ; CHECK-NEXT: movzbl %ah, %ebx -; CHECK-NEXT: LBB0_16: ## %ubyte_ctype_remainder.exit +; CHECK-NEXT: jmp LBB0_18 +; CHECK-NEXT: LBB0_14: ## %cond_true.i200 +; CHECK-NEXT: testb %dl, %dl +; CHECK-NEXT: jne LBB0_17 +; CHECK-NEXT: ## %bb.16: ## %cond_true14.i +; CHECK-NEXT: movl $4, %edi +; CHECK-NEXT: callq _feraiseexcept +; CHECK-NEXT: LBB0_17: ## %ubyte_ctype_remainder.exit +; CHECK-NEXT: xorl %ebx, %ebx +; CHECK-NEXT: LBB0_18: ## %ubyte_ctype_remainder.exit ; CHECK-NEXT: movq (%r14), %rax ; CHECK-NEXT: callq *224(%rax) ; CHECK-NEXT: testl %eax, %eax -; CHECK-NEXT: je LBB0_19 -; CHECK-NEXT: ## %bb.17: ## %cond_true61 +; CHECK-NEXT: je LBB0_21 +; CHECK-NEXT: ## %bb.19: ## %cond_true61 ; CHECK-NEXT: movl %eax, %ebp ; CHECK-NEXT: movq (%r14), %rax ; CHECK-NEXT: movq _.str5@GOTPCREL(%rip), %rdi @@ -139,8 +136,8 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) { ; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rcx ; CHECK-NEXT: callq *200(%rax) ; CHECK-NEXT: testl %eax, %eax -; CHECK-NEXT: js LBB0_23 -; CHECK-NEXT: ## %bb.18: ## %cond_next73 +; CHECK-NEXT: js LBB0_27 +; CHECK-NEXT: ## %bb.20: ## %cond_next73 ; CHECK-NEXT: movl $1, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq (%r14), %rax ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rsi @@ -149,13 +146,13 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) { ; CHECK-NEXT: movl %ebp, %edx ; CHECK-NEXT: callq *232(%rax) ; CHECK-NEXT: testl %eax, %eax -; CHECK-NEXT: jne LBB0_23 -; CHECK-NEXT: LBB0_19: ## %cond_next89 +; CHECK-NEXT: jne LBB0_27 +; CHECK-NEXT: LBB0_21: ## %cond_next89 ; CHECK-NEXT: movl $2, %edi ; CHECK-NEXT: callq _PyTuple_New ; CHECK-NEXT: testq %rax, %rax -; CHECK-NEXT: je LBB0_23 -; CHECK-NEXT: ## %bb.20: ## %cond_next97 +; CHECK-NEXT: je LBB0_27 +; CHECK-NEXT: ## %bb.22: ## %cond_next97 ; CHECK-NEXT: movq %rax, %r14 ; CHECK-NEXT: movq _PyArray_API@GOTPCREL(%rip), %r12 ; CHECK-NEXT: movq (%r12), %rax @@ -163,8 +160,8 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) { ; CHECK-NEXT: xorl %esi, %esi ; CHECK-NEXT: callq *304(%rdi) ; CHECK-NEXT: testq %rax, %rax -; CHECK-NEXT: je LBB0_21 -; CHECK-NEXT: ## %bb.25: ## %cond_next135 +; CHECK-NEXT: je LBB0_25 +; CHECK-NEXT: ## %bb.23: ## %cond_next135 ; CHECK-NEXT: movb %r15b, 16(%rax) ; CHECK-NEXT: movq %rax, 24(%r14) ; CHECK-NEXT: movq (%r12), %rax @@ -172,22 +169,22 @@ define ptr @ubyte_divmod(ptr %a, ptr %b) { ; CHECK-NEXT: xorl %esi, %esi ; CHECK-NEXT: callq *304(%rdi) ; CHECK-NEXT: testq %rax, %rax -; CHECK-NEXT: je LBB0_21 -; CHECK-NEXT: ## %bb.26: ## %cond_next182 +; CHECK-NEXT: je LBB0_25 +; CHECK-NEXT: ## %bb.24: ## %cond_next182 ; CHECK-NEXT: movb %bl, 16(%rax) ; CHECK-NEXT: movq %rax, 32(%r14) ; CHECK-NEXT: movq %r14, %rax -; CHECK-NEXT: jmp LBB0_24 -; CHECK-NEXT: LBB0_21: ## %cond_true113 +; CHECK-NEXT: jmp LBB0_28 +; CHECK-NEXT: LBB0_25: ## %cond_true113 ; CHECK-NEXT: decq (%r14) -; CHECK-NEXT: jne LBB0_23 -; CHECK-NEXT: ## %bb.22: ## %cond_true126 +; CHECK-NEXT: jne LBB0_27 +; CHECK-NEXT: ## %bb.26: ## %cond_true126 ; CHECK-NEXT: movq 8(%r14), %rax ; CHECK-NEXT: movq %r14, %rdi ; CHECK-NEXT: callq *48(%rax) -; CHECK-NEXT: LBB0_23: ## %UnifiedReturnBlock +; CHECK-NEXT: LBB0_27: ## %UnifiedReturnBlock ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: LBB0_24: ## %UnifiedReturnBlock +; CHECK-NEXT: LBB0_28: ## %UnifiedReturnBlock ; CHECK-NEXT: addq $32, %rsp ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: popq %r12 diff --git a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll index 7bde1b7..7cdfd51 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll @@ -7,12 +7,15 @@ define i128 @test_add_i128(i128 %arg1, i128 %arg2) nounwind { ; X64: # %bb.0: ; X64-NEXT: movq %rdx, %rax ; X64-NEXT: addq %rdi, %rax +; X64-NEXT: setb %dl +; X64-NEXT: cmpb $1, %dl ; X64-NEXT: adcq %rsi, %rcx ; X64-NEXT: movq %rcx, %rdx ; X64-NEXT: retq ; ; X86-LABEL: test_add_i128: ; X86: # %bb.0: +; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -21,8 +24,14 @@ define i128 @test_add_i128(i128 %arg1, i128 %arg2) nounwind { ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: setb %bl +; X86-NEXT: cmpb $1, %bl ; X86-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X86-NEXT: setb %bl +; X86-NEXT: cmpb $1, %bl ; X86-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X86-NEXT: setb %bl +; X86-NEXT: cmpb $1, %bl ; X86-NEXT: adcl {{[0-9]+}}(%esp), %edi ; X86-NEXT: movl %ecx, (%eax) ; X86-NEXT: movl %edx, 4(%eax) @@ -30,6 +39,7 @@ define i128 @test_add_i128(i128 %arg1, i128 %arg2) nounwind { ; X86-NEXT: movl %edi, 12(%eax) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi +; X86-NEXT: popl %ebx ; X86-NEXT: retl %ret = add i128 %arg1, %arg2 ret i128 %ret @@ -46,6 +56,8 @@ define i64 @test_add_i64(i64 %arg1, i64 %arg2) { ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: addl {{[0-9]+}}(%esp), %eax +; X86-NEXT: setb %cl +; X86-NEXT: cmpb $1, %cl ; X86-NEXT: adcl {{[0-9]+}}(%esp), %edx ; X86-NEXT: retl %ret = add i64 %arg1, %arg2 diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir index ec9db78..dae2ad6 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir @@ -157,8 +157,8 @@ body: | ; X86: [[COPY:%[0-9]+]]:_(s64) = COPY $rdx ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) ; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) - ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]] - ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]] + ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s8) = G_UADDO [[UV]], [[UV2]] + ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s8) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]] ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32) ; X86-NEXT: $rax = COPY [[MV]](s64) ; X86-NEXT: RET 0 @@ -192,8 +192,8 @@ body: | ; X86-NEXT: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](s64) ; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF1]](s64) - ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]] - ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]] + ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s8) = G_UADDO [[UV]], [[UV2]] + ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s8) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]] ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32) ; X86-NEXT: $rax = COPY [[MV]](s64) ; X86-NEXT: RET 0 @@ -219,8 +219,8 @@ body: | ; X64-NEXT: [[DEF1:%[0-9]+]]:_(s128) = IMPLICIT_DEF ; X64-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[DEF]](s128) ; X64-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[DEF1]](s128) - ; X64-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]] - ; X64-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]] + ; X64-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s8) = G_UADDO [[UV]], [[UV2]] + ; X64-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s8) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]] ; X64-NEXT: $rax = COPY [[UADDO]](s64) ; X64-NEXT: $rdx = COPY [[UADDE]](s64) ; X64-NEXT: RET 0 @@ -230,10 +230,10 @@ body: | ; X86-NEXT: [[DEF1:%[0-9]+]]:_(s128) = IMPLICIT_DEF ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](s128) ; X86-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF1]](s128) - ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV4]] - ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV5]], [[UADDO1]] - ; X86-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV2]], [[UV6]], [[UADDE1]] - ; X86-NEXT: [[UADDE4:%[0-9]+]]:_(s32), [[UADDE5:%[0-9]+]]:_(s1) = G_UADDE [[UV3]], [[UV7]], [[UADDE3]] + ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s8) = G_UADDO [[UV]], [[UV4]] + ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s8) = G_UADDE [[UV1]], [[UV5]], [[UADDO1]] + ; X86-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s8) = G_UADDE [[UV2]], [[UV6]], [[UADDE1]] + ; X86-NEXT: [[UADDE4:%[0-9]+]]:_(s32), [[UADDE5:%[0-9]+]]:_(s8) = G_UADDE [[UV3]], [[UV7]], [[UADDE3]] ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32) ; X86-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDE2]](s32), [[UADDE4]](s32) ; X86-NEXT: $rax = COPY [[MV]](s64) diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-leading-zeros.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-leading-zeros.mir index 19fe5b8..470a30fd 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/legalize-leading-zeros.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-leading-zeros.mir @@ -25,6 +25,7 @@ body: | ; X64-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CTLZ]], [[C1]] ; X64-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C]] ; X64-NEXT: RET 0, implicit [[AND1]](s64) + ; ; X86-LABEL: name: test_ctlz35 ; X86: [[COPY:%[0-9]+]]:_(s64) = COPY $rdx ; X86-NEXT: [[TRUNC:%[0-9]+]]:_(s35) = G_TRUNC [[COPY]](s64) @@ -46,12 +47,15 @@ body: | ; X86-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C]](s32) ; X86-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64) ; X86-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV1]](s64) - ; X86-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV6]], [[UV8]] - ; X86-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV7]], [[UV9]], [[USUBO1]] + ; X86-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s8) = G_USUBO [[UV6]], [[UV8]] + ; X86-NEXT: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[USUBO1]](s8) + ; X86-NEXT: [[ZEXT2:%[0-9]+]]:_(s8) = G_ZEXT [[TRUNC1]](s1) + ; X86-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s8) = G_USUBE [[UV7]], [[UV9]], [[ZEXT2]] + ; X86-NEXT: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[USUBE1]](s8) ; X86-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32) - ; X86-NEXT: [[TRUNC1:%[0-9]+]]:_(s35) = G_TRUNC [[MV2]](s64) - ; X86-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC1]](s35) - ; X86-NEXT: RET 0, implicit [[ZEXT2]](s64) + ; X86-NEXT: [[TRUNC3:%[0-9]+]]:_(s35) = G_TRUNC [[MV2]](s64) + ; X86-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC3]](s35) + ; X86-NEXT: RET 0, implicit [[ZEXT3]](s64) %0(s64) = COPY $rdx %1:_(s35) = G_TRUNC %0(s64) %2:_(s35) = G_CTLZ %1 @@ -97,6 +101,7 @@ body: | ; X64-NEXT: [[CTLZ:%[0-9]+]]:_(s64) = G_CTLZ [[DEF]](s64) ; X64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[CTLZ]](s64) ; X64-NEXT: RET 0, implicit [[COPY]](s64) + ; ; X86-LABEL: name: test_ctlz64 ; X86: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](s64) diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir index ee2b9ee..ac3bf33 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir @@ -157,8 +157,8 @@ body: | ; X86: [[COPY:%[0-9]+]]:_(s64) = COPY $rdx ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) ; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) - ; X86-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]] - ; X86-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]] + ; X86-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s8) = G_USUBO [[UV]], [[UV2]] + ; X86-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s8) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]] ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32) ; X86-NEXT: $rax = COPY [[MV]](s64) ; X86-NEXT: RET 0 @@ -192,8 +192,8 @@ body: | ; X86-NEXT: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](s64) ; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF1]](s64) - ; X86-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]] - ; X86-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]] + ; X86-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s8) = G_USUBO [[UV]], [[UV2]] + ; X86-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s8) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]] ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32) ; X86-NEXT: $rax = COPY [[MV]](s64) ; X86-NEXT: RET 0 @@ -219,8 +219,8 @@ body: | ; X64-NEXT: [[DEF1:%[0-9]+]]:_(s128) = IMPLICIT_DEF ; X64-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[DEF]](s128) ; X64-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[DEF1]](s128) - ; X64-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]] - ; X64-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]] + ; X64-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s8) = G_USUBO [[UV]], [[UV2]] + ; X64-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s8) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]] ; X64-NEXT: $rax = COPY [[USUBO]](s64) ; X64-NEXT: $rdx = COPY [[USUBE]](s64) ; X64-NEXT: RET 0 @@ -230,10 +230,10 @@ body: | ; X86-NEXT: [[DEF1:%[0-9]+]]:_(s128) = IMPLICIT_DEF ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](s128) ; X86-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF1]](s128) - ; X86-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV4]] - ; X86-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV5]], [[USUBO1]] - ; X86-NEXT: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV2]], [[UV6]], [[USUBE1]] - ; X86-NEXT: [[USUBE4:%[0-9]+]]:_(s32), [[USUBE5:%[0-9]+]]:_(s1) = G_USUBE [[UV3]], [[UV7]], [[USUBE3]] + ; X86-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s8) = G_USUBO [[UV]], [[UV4]] + ; X86-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s8) = G_USUBE [[UV1]], [[UV5]], [[USUBO1]] + ; X86-NEXT: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s8) = G_USUBE [[UV2]], [[UV6]], [[USUBE1]] + ; X86-NEXT: [[USUBE4:%[0-9]+]]:_(s32), [[USUBE5:%[0-9]+]]:_(s8) = G_USUBE [[UV3]], [[UV7]], [[USUBE3]] ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32) ; X86-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBE2]](s32), [[USUBE4]](s32) ; X86-NEXT: $rax = COPY [[MV]](s64) diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros-undef.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros-undef.mir index 9807d13..57e729f 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros-undef.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros-undef.mir @@ -32,8 +32,8 @@ body: | ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(eq), [[OR]](s32), [[C]] ; X86-NEXT: [[CTTZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[OR1]](s32) ; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 - ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[CTTZ_ZERO_UNDEF]], [[C2]] - ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[C]], [[C]], [[UADDO1]] + ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s8) = G_UADDO [[CTTZ_ZERO_UNDEF]], [[C2]] + ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s8) = G_UADDE [[C]], [[C]], [[UADDO1]] ; X86-NEXT: [[CTTZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[OR]](s32) ; X86-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8) ; X86-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 @@ -97,8 +97,8 @@ body: | ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(eq), [[UV]](s32), [[C]] ; X86-NEXT: [[CTTZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[UV1]](s32) ; X86-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 - ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[CTTZ_ZERO_UNDEF]], [[C1]] - ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[C]], [[C]], [[UADDO1]] + ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s8) = G_UADDO [[CTTZ_ZERO_UNDEF]], [[C1]] + ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s8) = G_UADDE [[C]], [[C]], [[UADDO1]] ; X86-NEXT: [[CTTZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[UV]](s32) ; X86-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8) ; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros.mir index e2d10423..f5d8477 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros.mir @@ -32,8 +32,8 @@ body: | ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(eq), [[OR]](s32), [[C]] ; X86-NEXT: [[CTTZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[OR1]](s32) ; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 - ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[CTTZ_ZERO_UNDEF]], [[C2]] - ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[C]], [[C]], [[UADDO1]] + ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s8) = G_UADDO [[CTTZ_ZERO_UNDEF]], [[C2]] + ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s8) = G_UADDE [[C]], [[C]], [[UADDO1]] ; X86-NEXT: [[CTTZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[OR]](s32) ; X86-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8) ; X86-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 @@ -99,8 +99,8 @@ body: | ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(eq), [[UV]](s32), [[C]] ; X86-NEXT: [[CTTZ:%[0-9]+]]:_(s32) = G_CTTZ [[UV1]](s32) ; X86-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 - ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[CTTZ]], [[C1]] - ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[C]], [[C]], [[UADDO1]] + ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s8) = G_UADDO [[CTTZ]], [[C1]] + ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s8) = G_UADDE [[C]], [[C]], [[UADDO1]] ; X86-NEXT: [[CTTZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[UV]](s32) ; X86-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8) ; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 diff --git a/llvm/test/CodeGen/X86/GlobalISel/pr49087.ll b/llvm/test/CodeGen/X86/GlobalISel/pr49087.ll new file mode 100644 index 0000000..41d890b --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/pr49087.ll @@ -0,0 +1,50 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -o - -global-isel -global-isel-abort=1 < %s 2>&1 | FileCheck %s + +define i32 @test_01(ptr %p, i64 %len, i32 %x) { +; CHECK-LABEL: test_01: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movl $1, %eax +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: .LBB0_1: # %loop +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: subq %rax, %rsi +; CHECK-NEXT: setb %cl +; CHECK-NEXT: testb $1, %cl +; CHECK-NEXT: jne .LBB0_4 +; CHECK-NEXT: # %bb.2: # %backedge +; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1 +; CHECK-NEXT: imulq $4, %rsi, %rcx +; CHECK-NEXT: addq %rdi, %rcx +; CHECK-NEXT: cmpl %edx, (%rcx) +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb $1, %cl +; CHECK-NEXT: je .LBB0_1 +; CHECK-NEXT: # %bb.3: # %failure +; CHECK-NEXT: .LBB0_4: # %exit +; CHECK-NEXT: movl $-1, %eax +; CHECK-NEXT: retq + +entry: + %scevgep = getelementptr i32, ptr %p, i64 -1 + br label %loop + +loop: ; preds = %backedge, %entry + %iv = phi i64 [ %iv.next, %backedge ], [ %len, %entry ] + %iv.next = add i64 %iv, -1 + %cond_1 = icmp eq i64 %iv, 0 + br i1 %cond_1, label %exit, label %backedge + +backedge: ; preds = %loop + %scevgep1 = getelementptr i32, ptr %scevgep, i64 %iv + %loaded = load atomic i32, ptr %scevgep1 unordered, align 4 + %cond_2 = icmp eq i32 %loaded, %x + br i1 %cond_2, label %failure, label %loop + +exit: ; preds = %loop + ret i32 -1 + +failure: + unreachable +} + diff --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X32.mir b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X32.mir index 8eac3eaf..76680ac 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X32.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X32.mir @@ -29,8 +29,8 @@ body: | bb.0 (%ir-block.0): %0(s32) = IMPLICIT_DEF %1(s32) = IMPLICIT_DEF - %2(s1) = IMPLICIT_DEF - %3(s32), %4(s1) = G_UADDE %0, %1, %2 + %2(s8) = IMPLICIT_DEF + %3(s32), %4(s8) = G_UADDE %0, %1, %2 RET 0 ... diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-add-x32.mir b/llvm/test/CodeGen/X86/GlobalISel/select-add-x32.mir index 773813f..b85180f 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/select-add-x32.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/select-add-x32.mir @@ -27,25 +27,24 @@ body: | bb.0 (%ir-block.0): ; X32-LABEL: name: test_add_i64 ; X32: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF - ; X32: [[DEF1:%[0-9]+]]:gr32 = IMPLICIT_DEF - ; X32: [[DEF2:%[0-9]+]]:gr32 = IMPLICIT_DEF - ; X32: [[DEF3:%[0-9]+]]:gr32 = IMPLICIT_DEF - ; X32: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr [[DEF]], [[DEF2]], implicit-def $eflags - ; X32: [[COPY:%[0-9]+]]:gr32 = COPY $eflags - ; X32: $eflags = COPY [[COPY]] - ; X32: [[ADC32rr:%[0-9]+]]:gr32 = ADC32rr [[DEF1]], [[DEF3]], implicit-def $eflags, implicit $eflags - ; X32: [[COPY1:%[0-9]+]]:gr32 = COPY $eflags - ; X32: $eax = COPY [[ADD32rr]] - ; X32: $edx = COPY [[ADC32rr]] - ; X32: RET 0, implicit $eax, implicit $edx + ; X32-NEXT: [[DEF1:%[0-9]+]]:gr32 = IMPLICIT_DEF + ; X32-NEXT: [[DEF2:%[0-9]+]]:gr32 = IMPLICIT_DEF + ; X32-NEXT: [[DEF3:%[0-9]+]]:gr32 = IMPLICIT_DEF + ; X32-NEXT: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr [[DEF]], [[DEF2]], implicit-def $eflags + ; X32-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags + ; X32-NEXT: CMP8ri [[SETCCr]], 1, implicit-def $eflags + ; X32-NEXT: [[ADC32rr:%[0-9]+]]:gr32 = ADC32rr [[DEF1]], [[DEF3]], implicit-def $eflags, implicit $eflags + ; X32-NEXT: [[SETCCr1:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags + ; X32-NEXT: $eax = COPY [[ADD32rr]] + ; X32-NEXT: $edx = COPY [[ADC32rr]] + ; X32-NEXT: RET 0, implicit $eax, implicit $edx %0(s32) = IMPLICIT_DEF %1(s32) = IMPLICIT_DEF %2(s32) = IMPLICIT_DEF %3(s32) = IMPLICIT_DEF %9(s8) = G_CONSTANT i8 0 - %4(s1) = G_TRUNC %9(s8) - %5(s32), %6(s1) = G_UADDE %0, %2, %4 - %7(s32), %8(s1) = G_UADDE %1, %3, %6 + %5(s32), %6(s8) = G_UADDE %0, %2, %9 + %7(s32), %8(s8) = G_UADDE %1, %3, %6 $eax = COPY %5(s32) $edx = COPY %7(s32) RET 0, implicit $eax, implicit $edx diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-get-carry-bit.ll b/llvm/test/CodeGen/X86/GlobalISel/select-get-carry-bit.ll new file mode 100644 index 0000000..0cf1372 --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/select-get-carry-bit.ll @@ -0,0 +1,21 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -global-isel=1 -global-isel-abort=1 | FileCheck %s + +; Issue #120029 +define i16 @use_carry_bit(i16 %2) { +; CHECK-LABEL: use_carry_bit: +; CHECK: # %bb.0: +; CHECK-NEXT: movw $1, %ax +; CHECK-NEXT: xorl %ecx, %ecx +; CHECK-NEXT: addw %di, %ax +; CHECK-NEXT: setb %cl +; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: cmovnew %di, %ax +; CHECK-NEXT: retq + %uadd = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 %2, i16 1) + %res = extractvalue { i16, i1 } %uadd, 0 + %carry = extractvalue { i16, i1 } %uadd, 1 + %ret = select i1 %carry, i16 %2, i16 %res + ret i16 %ret +} + diff --git a/llvm/test/CodeGen/X86/GlobalISel/sub-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/sub-scalar.ll index 7a035f5..be75d7c 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/sub-scalar.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/sub-scalar.ll @@ -7,12 +7,15 @@ define i128 @test_sub_i128(i128 %arg1, i128 %arg2) nounwind { ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: subq %rdx, %rax +; X64-NEXT: setb %dl +; X64-NEXT: cmpb $1, %dl ; X64-NEXT: sbbq %rcx, %rsi ; X64-NEXT: movq %rsi, %rdx ; X64-NEXT: retq ; ; X86-LABEL: test_sub_i128: ; X86: # %bb.0: +; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -21,8 +24,14 @@ define i128 @test_sub_i128(i128 %arg1, i128 %arg2) nounwind { ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: setb %bl +; X86-NEXT: cmpb $1, %bl ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx +; X86-NEXT: setb %bl +; X86-NEXT: cmpb $1, %bl ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi +; X86-NEXT: setb %bl +; X86-NEXT: cmpb $1, %bl ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edi ; X86-NEXT: movl %ecx, (%eax) ; X86-NEXT: movl %edx, 4(%eax) @@ -30,6 +39,7 @@ define i128 @test_sub_i128(i128 %arg1, i128 %arg2) nounwind { ; X86-NEXT: movl %edi, 12(%eax) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi +; X86-NEXT: popl %ebx ; X86-NEXT: retl %ret = sub i128 %arg1, %arg2 ret i128 %ret @@ -47,6 +57,8 @@ define i64 @test_sub_i64(i64 %arg1, i64 %arg2) { ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: subl {{[0-9]+}}(%esp), %eax +; X86-NEXT: setb %cl +; X86-NEXT: cmpb $1, %cl ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx ; X86-NEXT: retl %ret = sub i64 %arg1, %arg2 diff --git a/llvm/test/CodeGen/X86/absolute-symbol-kernel-code-model.ll b/llvm/test/CodeGen/X86/absolute-symbol-kernel-code-model.ll new file mode 100644 index 0000000..ce7024d --- /dev/null +++ b/llvm/test/CodeGen/X86/absolute-symbol-kernel-code-model.ll @@ -0,0 +1,34 @@ +; RUN: llc --code-model=kernel < %s -asm-verbose=0 | FileCheck %s + +target triple = "x86_64-unknown-linux-gnu" + +; CHECK-LABEL: func_no_abs_sym +define i64 @func_no_abs_sym() nounwind { + ; CHECK: movq $no_abs_sym, %rax + %1 = ptrtoint ptr @no_abs_sym to i64 + ret i64 %1 +} + +; CHECK-LABEL: func_abs_sym +define i64 @func_abs_sym() nounwind { + ; CHECK: movabsq $abs_sym, %rax + %1 = ptrtoint ptr @abs_sym to i64 + ret i64 %1 +} + +; CHECK-LABEL: func_abs_sym_in_range +define i64 @func_abs_sym_in_range() nounwind { + ;; The absolute_symbol range fits in 32 bits but we still use movabs + ;; since there's no benefit to using the sign extending instruction + ;; with absolute symbols. + ; CHECK: movabsq $abs_sym_in_range, %rax + %1 = ptrtoint ptr @abs_sym_in_range to i64 + ret i64 %1 +} + +@no_abs_sym = external hidden global [0 x i8] +@abs_sym = external hidden global [0 x i8], !absolute_symbol !0 +@abs_sym_in_range = external hidden global [0 x i8], !absolute_symbol !1 + +!0 = !{i64 -1, i64 -1} ;; Full range +!1 = !{i64 -2147483648, i64 2147483648} ;; In range diff --git a/llvm/test/CodeGen/X86/apx/cf.ll b/llvm/test/CodeGen/X86/apx/cf.ll index b2651e9..de9caa5 100644 --- a/llvm/test/CodeGen/X86/apx/cf.ll +++ b/llvm/test/CodeGen/X86/apx/cf.ll @@ -230,6 +230,24 @@ entry: ret void } +define void @and_cond(i32 %a, i1 %b) { +; CHECK-LABEL: and_cond: +; CHECK: # %bb.0: +; CHECK-NEXT: testl %edi, %edi +; CHECK-NEXT: setg %al +; CHECK-NEXT: notb %sil +; CHECK-NEXT: xorl %ecx, %ecx +; CHECK-NEXT: testb %al, %sil +; CHECK-NEXT: cfcmovnel %ecx, 0 +; CHECK-NEXT: retq + %is_pos = icmp sgt i32 %a, 0 + %not_b = xor i1 %b, true + %cond = and i1 %not_b, %is_pos + %mask = insertelement <1 x i1> zeroinitializer, i1 %cond, i64 0 + call void @llvm.masked.store.v1i32.p0(<1 x i32> zeroinitializer, ptr null, i32 1, <1 x i1> %mask) + ret void +} + define i64 @redundant_test(i64 %num, ptr %p1, i64 %in) { ; CHECK-LABEL: redundant_test: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll index 0de308a..5152c005 100644 --- a/llvm/test/CodeGen/X86/avg.ll +++ b/llvm/test/CodeGen/X86/avg.ll @@ -728,45 +728,70 @@ define void @avg_v32i8_2(ptr %a, ptr %b) nounwind { define void @avg_v64i8_2(ptr %a, ptr %b) nounwind { ; SSE2-LABEL: avg_v64i8_2: ; SSE2: # %bb.0: -; SSE2-NEXT: movaps (%rsi), %xmm0 -; SSE2-NEXT: movaps 16(%rsi), %xmm1 -; SSE2-NEXT: movaps 32(%rsi), %xmm2 -; SSE2-NEXT: movaps 48(%rsi), %xmm3 -; SSE2-NEXT: movups %xmm3, (%rax) -; SSE2-NEXT: movups %xmm2, (%rax) -; SSE2-NEXT: movups %xmm1, (%rax) -; SSE2-NEXT: movups %xmm0, (%rax) +; SSE2-NEXT: movdqa (%rdi), %xmm0 +; SSE2-NEXT: movdqa 16(%rdi), %xmm1 +; SSE2-NEXT: movdqa 32(%rdi), %xmm2 +; SSE2-NEXT: movdqa 48(%rdi), %xmm3 +; SSE2-NEXT: pavgb (%rsi), %xmm0 +; SSE2-NEXT: pavgb 16(%rsi), %xmm1 +; SSE2-NEXT: pavgb 32(%rsi), %xmm2 +; SSE2-NEXT: pavgb 48(%rsi), %xmm3 +; SSE2-NEXT: movdqu %xmm3, (%rax) +; SSE2-NEXT: movdqu %xmm2, (%rax) +; SSE2-NEXT: movdqu %xmm1, (%rax) +; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v64i8_2: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovaps (%rsi), %ymm0 -; AVX1-NEXT: vmovaps 32(%rsi), %ymm1 -; AVX1-NEXT: vmovups %ymm1, (%rax) -; AVX1-NEXT: vmovups %ymm0, (%rax) -; AVX1-NEXT: vzeroupper +; AVX1-NEXT: vmovdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2 +; AVX1-NEXT: vmovdqa 48(%rdi), %xmm3 +; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0 +; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1 +; AVX1-NEXT: vpavgb 32(%rsi), %xmm2, %xmm2 +; AVX1-NEXT: vpavgb 48(%rsi), %xmm3, %xmm3 +; AVX1-NEXT: vmovdqu %xmm3, (%rax) +; AVX1-NEXT: vmovdqu %xmm2, (%rax) +; AVX1-NEXT: vmovdqu %xmm1, (%rax) +; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v64i8_2: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovaps (%rsi), %ymm0 -; AVX2-NEXT: vmovaps 32(%rsi), %ymm1 -; AVX2-NEXT: vmovups %ymm1, (%rax) -; AVX2-NEXT: vmovups %ymm0, (%rax) +; AVX2-NEXT: vmovdqa (%rdi), %ymm0 +; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 +; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0 +; AVX2-NEXT: vpavgb 32(%rsi), %ymm1, %ymm1 +; AVX2-NEXT: vmovdqu %ymm1, (%rax) +; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512-LABEL: avg_v64i8_2: -; AVX512: # %bb.0: -; AVX512-NEXT: vmovaps (%rsi), %zmm0 -; AVX512-NEXT: vmovups %zmm0, (%rax) -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; AVX512F-LABEL: avg_v64i8_2: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 +; AVX512F-NEXT: vpavgb (%rsi), %ymm0, %ymm0 +; AVX512F-NEXT: vpavgb 32(%rsi), %ymm1, %ymm1 +; AVX512F-NEXT: vmovdqu %ymm1, (%rax) +; AVX512F-NEXT: vmovdqu %ymm0, (%rax) +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: avg_v64i8_2: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512BW-NEXT: vpavgb (%rsi), %zmm0, %zmm0 +; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq %1 = load <64 x i8>, ptr %a %2 = load <64 x i8>, ptr %b %3 = zext <64 x i8> %1 to <64 x i32> %4 = zext <64 x i8> %2 to <64 x i32> - %5 = add nuw nsw <64 x i32> %4, %4 + %5 = add nuw nsw <64 x i32> %3, %4 %6 = add nuw nsw <64 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %7 = lshr <64 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %8 = trunc <64 x i32> %7 to <64 x i8> @@ -774,7 +799,6 @@ define void @avg_v64i8_2(ptr %a, ptr %b) nounwind { ret void } - define void @avg_v4i16_2(ptr %a, ptr %b) nounwind { ; SSE2-LABEL: avg_v4i16_2: ; SSE2: # %bb.0: diff --git a/llvm/test/CodeGen/X86/avx-shift.ll b/llvm/test/CodeGen/X86/avx-shift.ll index c9c09d7..3bce843 100644 --- a/llvm/test/CodeGen/X86/avx-shift.ll +++ b/llvm/test/CodeGen/X86/avx-shift.ll @@ -201,7 +201,7 @@ define <8 x i32> @vshift08_add(<8 x i32> %a, <8 x i32> %y) { define <4 x i32> @vshift13(<4 x i32> %in) { ; CHECK-LABEL: vshift13: ; CHECK: # %bb.0: -; CHECK-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2,4,16] ; CHECK-NEXT: retq %T = shl <4 x i32> %in, <i32 0, i32 1, i32 2, i32 4> ret <4 x i32> %T diff --git a/llvm/test/CodeGen/X86/avx2-arith.ll b/llvm/test/CodeGen/X86/avx2-arith.ll index 70b3b99..1133cdfd 100644 --- a/llvm/test/CodeGen/X86/avx2-arith.ll +++ b/llvm/test/CodeGen/X86/avx2-arith.ll @@ -199,12 +199,12 @@ define <8 x i32> @mul_const5(<8 x i32> %x) { define <8 x i32> @mul_const6(<8 x i32> %x) { ; X86-LABEL: mul_const6: ; X86: # %bb.0: -; X86-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 +; X86-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # [0,0,0,2,0,2,0,0] ; X86-NEXT: retl ; ; X64-LABEL: mul_const6: ; X64: # %bb.0: -; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,0,0,2,0,2,0,0] ; X64-NEXT: retq %y = mul <8 x i32> %x, <i32 0, i32 0, i32 0, i32 2, i32 0, i32 2, i32 0, i32 0> ret <8 x i32> %y diff --git a/llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll b/llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll index 2aea9c1..632d90d 100644 --- a/llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll +++ b/llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll @@ -27,7 +27,7 @@ entry: !1 = !{i64 0, !"_ZTSFivE.generalized"} !2 = !{i64 0, !"_ZTSFviE.generalized"} -; CHECK: .section .callgraph,"o",@progbits,.text +; CHECK: .section .llvm.callgraph,"o",@progbits,.text ;; Version ; CHECK-NEXT: .byte 0 ;; Flags -- Potential indirect target so LSB is set to 1. Other bits are 0. diff --git a/llvm/test/CodeGen/X86/call-graph-section-assembly.ll b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll index 1aabf66..ed6849a 100644 --- a/llvm/test/CodeGen/X86/call-graph-section-assembly.ll +++ b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll @@ -1,8 +1,8 @@ ;; Test if temporary labels are generated for each indirect callsite. -;; Test if the .callgraph section contains the MD5 hash of callees' type (type id) +;; Test if the .llvm.callgraph section contains the MD5 hash of callees' type (type id) ;; is correctly paired with its corresponding temporary label generated for indirect ;; call sites annotated with !callee_type metadata. -;; Test if the .callgraph section contains unique direct callees. +;; Test if the .llvm.callgraph section contains unique direct callees. ; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -o - < %s | FileCheck %s @@ -36,7 +36,7 @@ entry: !4 = !{!5} !5 = !{i64 0, !"_ZTSFPvS_E.generalized"} -; CHECK: .section .callgraph,"o",@progbits,.text +; CHECK: .section .llvm.callgraph,"o",@progbits,.text ;; Version ; CHECK-NEXT: .byte 0 ;; Flags diff --git a/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll b/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll index 34dc5b8..49cc335 100644 --- a/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll +++ b/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll @@ -1,7 +1,10 @@ -;; Tests that we store the type identifiers in .callgraph section of the object file for tailcalls. +;; Tests that we store the type identifiers in .llvm.callgraph section of the object file for tailcalls. + +; REQUIRES: x86-registered-target +; REQUIRES: arm-registered-target ; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -filetype=obj -o - < %s | \ -; RUN: llvm-readelf -x .callgraph - | FileCheck %s +; RUN: llvm-readelf -x .llvm.callgraph - | FileCheck %s define i32 @check_tailcall(ptr %func, i8 %x) !type !0 { entry: @@ -27,7 +30,7 @@ declare !type !2 i32 @bar(i8 signext) !2 = !{i64 0, !"_ZTSFicE.generalized"} !3 = !{i64 0, !"_ZTSFiiE.generalized"} -; CHECK: Hex dump of section '.callgraph': +; CHECK: Hex dump of section '.llvm.callgraph': ; CHECK-NEXT: 0x00000000 00050000 00000000 00008e19 0b7f3326 ; CHECK-NEXT: 0x00000010 e3000154 86bc5981 4b8e3000 05000000 ;; Verify that the type id 0x308e4b8159bc8654 is in section. diff --git a/llvm/test/CodeGen/X86/call-graph-section.ll b/llvm/test/CodeGen/X86/call-graph-section.ll index c144a24..8a1c6ca 100644 --- a/llvm/test/CodeGen/X86/call-graph-section.ll +++ b/llvm/test/CodeGen/X86/call-graph-section.ll @@ -1,7 +1,10 @@ -;; Tests that we store the type identifiers in .callgraph section of the object file. +;; Tests that we store the type identifiers in .llvm.callgraph section of the object file. + +; REQUIRES: x86-registered-target +; REQUIRES: arm-registered-target ; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -filetype=obj -o - < %s | \ -; RUN: llvm-readelf -x .callgraph - | FileCheck %s +; RUN: llvm-readelf -x .llvm.callgraph - | FileCheck %s declare !type !0 void @foo() @@ -31,7 +34,7 @@ entry: ;; Make sure following type IDs are in call graph section ;; 0x5eecb3e2444f731f, 0x814b8e305486bc59, 0xf897fd777ade6814 -; CHECK: Hex dump of section '.callgraph': +; CHECK: Hex dump of section '.llvm.callgraph': ; CHECK-NEXT: 0x00000000 00050000 00000000 00000000 00000000 ; CHECK-NEXT: 0x00000010 00000324 44f731f5 eecb3e54 86bc5981 ; CHECK-NEXT: 0x00000020 4b8e307a de6814f8 97fd77 diff --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll index ae4d24f..29c41ca 100644 --- a/llvm/test/CodeGen/X86/combine-mul.ll +++ b/llvm/test/CodeGen/X86/combine-mul.ll @@ -66,7 +66,7 @@ define <4 x i32> @combine_vec_mul_pow2a(<4 x i32> %x) { define <4 x i32> @combine_vec_mul_pow2b(<4 x i32> %x) { ; SSE-LABEL: combine_vec_mul_pow2b: ; SSE: # %bb.0: -; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,16] ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_mul_pow2b: @@ -120,12 +120,12 @@ define <4 x i32> @combine_vec_mul_negpow2a(<4 x i32> %x) { define <4 x i32> @combine_vec_mul_negpow2b(<4 x i32> %x) { ; SSE-LABEL: combine_vec_mul_negpow2b: ; SSE: # %bb.0: -; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4294967295,4294967294,4294967292,4294967280] ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_mul_negpow2b: ; AVX: # %bb.0: -; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4294967295,4294967294,4294967292,4294967280] ; AVX-NEXT: retq %1 = mul <4 x i32> %x, <i32 -1, i32 -2, i32 -4, i32 -16> ret <4 x i32> %1 @@ -176,12 +176,12 @@ define <4 x i64> @combine_vec_mul_negpow2c(<4 x i64> %x) { define <4 x i32> @combine_vec_mul_shl_const(<4 x i32> %x) { ; SSE-LABEL: combine_vec_mul_shl_const: ; SSE: # %bb.0: -; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,12,1280,458752] ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_mul_shl_const: ; AVX: # %bb.0: -; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2,12,1280,458752] ; AVX-NEXT: retq %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16> %2 = mul <4 x i32> %1, <i32 1, i32 3, i32 5, i32 7> @@ -193,7 +193,7 @@ define <4 x i32> @combine_vec_mul_shl_oneuse0(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: combine_vec_mul_shl_oneuse0: ; SSE: # %bb.0: ; SSE-NEXT: pmulld %xmm1, %xmm0 -; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,256,65536] ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_mul_shl_oneuse0: @@ -210,7 +210,7 @@ define <4 x i32> @combine_vec_mul_shl_oneuse1(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: combine_vec_mul_shl_oneuse1: ; SSE: # %bb.0: ; SSE-NEXT: pmulld %xmm1, %xmm0 -; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,256,65536] ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_mul_shl_oneuse1: @@ -226,7 +226,7 @@ define <4 x i32> @combine_vec_mul_shl_oneuse1(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @combine_vec_mul_shl_multiuse0(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: combine_vec_mul_shl_multiuse0: ; SSE: # %bb.0: -; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,256,65536] ; SSE-NEXT: pmulld %xmm0, %xmm1 ; SSE-NEXT: paddd %xmm1, %xmm0 ; SSE-NEXT: retq @@ -246,7 +246,7 @@ define <4 x i32> @combine_vec_mul_shl_multiuse0(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @combine_vec_mul_shl_multiuse1(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: combine_vec_mul_shl_multiuse1: ; SSE: # %bb.0: -; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,256,65536] ; SSE-NEXT: pmulld %xmm0, %xmm1 ; SSE-NEXT: paddd %xmm1, %xmm0 ; SSE-NEXT: retq @@ -268,13 +268,13 @@ define <4 x i32> @combine_vec_mul_shl_multiuse1(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @combine_vec_mul_add(<4 x i32> %x) { ; SSE-LABEL: combine_vec_mul_add: ; SSE: # %bb.0: -; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,6,2,0] ; SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_mul_add: ; AVX: # %bb.0: -; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4,6,2,0] ; AVX-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %1 = add <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16> diff --git a/llvm/test/CodeGen/X86/combine-multiplies.ll b/llvm/test/CodeGen/X86/combine-multiplies.ll index a5d9846..4bdf20d 100644 --- a/llvm/test/CodeGen/X86/combine-multiplies.ll +++ b/llvm/test/CodeGen/X86/combine-multiplies.ll @@ -142,9 +142,9 @@ define void @testCombineMultiplies_non_splat(<4 x i32> %v1) nounwind { ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [11,22,33,44] ; CHECK-NEXT: paddd %xmm0, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [22,33,44,55] ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 +; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 # [33,u,55,u] ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [242,726,1452,2420] diff --git a/llvm/test/CodeGen/X86/combine-pmuldq.ll b/llvm/test/CodeGen/X86/combine-pmuldq.ll index 70335f8..ff5329c 100644 --- a/llvm/test/CodeGen/X86/combine-pmuldq.ll +++ b/llvm/test/CodeGen/X86/combine-pmuldq.ll @@ -204,16 +204,16 @@ define i32 @PR43159(ptr %a0) { ; SSE: # %bb.0: # %entry ; SSE-NEXT: movdqa (%rdi), %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [344322273,344322273,1916962805,1916962805] ; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: psrld $1, %xmm2 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5],xmm0[6,7] ; SSE-NEXT: psubd %xmm1, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,2147483648,2147483648] ; SSE-NEXT: paddd %xmm1, %xmm0 ; SSE-NEXT: psrld $7, %xmm0 -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [1645975491,344322273,2164392969,1916962805] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE-NEXT: psrld $6, %xmm1 ; SSE-NEXT: movd %xmm1, %edi @@ -226,15 +226,15 @@ define i32 @PR43159(ptr %a0) { ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [344322273,344322273,1916962805,1916962805] ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [2147483648,2147483648,2147483648,2147483648] ; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpsrld $7, %xmm1, %xmm1 ; AVX1-NEXT: vpsrld $1, %xmm0, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5],xmm0[6,7] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1645975491,344322273,2164392969,1916962805] ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX1-NEXT: vpsrld $6, %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, %edi @@ -247,9 +247,9 @@ define i32 @PR43159(ptr %a0) { ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [344322273,u,1916962805,u] ; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 -; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 +; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [1645975491,344322273,2164392969,1916962805] ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 @@ -270,9 +270,9 @@ define i32 @PR43159(ptr %a0) { ; AVX512VL: # %bb.0: # %entry ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [344322273,u,1916962805,u] ; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 -; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 +; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [1645975491,344322273,2164392969,1916962805] ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] ; AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 @@ -293,9 +293,9 @@ define i32 @PR43159(ptr %a0) { ; AVX512DQVL: # %bb.0: # %entry ; AVX512DQVL-NEXT: vmovdqa (%rdi), %xmm0 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [344322273,u,1916962805,u] ; AVX512DQVL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 -; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 +; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [1645975491,344322273,2164392969,1916962805] ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2],xmm1[3] ; AVX512DQVL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/combine-rotates.ll b/llvm/test/CodeGen/X86/combine-rotates.ll index 65d74c8..e7152ec 100644 --- a/llvm/test/CodeGen/X86/combine-rotates.ll +++ b/llvm/test/CodeGen/X86/combine-rotates.ll @@ -10,9 +10,9 @@ define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) { ; SSE2-LABEL: combine_vec_rot_rot: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [524288,131072,32768,8192] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [131072,u,8192,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll index 6bcbfe1..f7baee9 100644 --- a/llvm/test/CodeGen/X86/combine-sdiv.ll +++ b/llvm/test/CodeGen/X86/combine-sdiv.ll @@ -2927,7 +2927,7 @@ define <16 x i8> @pr38658(<16 x i8> %x) { ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] ; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] -; SSE2-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [0,0,0,0,0,0,0,37632] +; SSE2-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,147] ; SSE2-NEXT: psrlw $8, %xmm3 ; SSE2-NEXT: packuswb %xmm3, %xmm1 ; SSE2-NEXT: paddb %xmm1, %xmm0 @@ -2947,7 +2947,7 @@ define <16 x i8> @pr38658(<16 x i8> %x) { ; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,0,0,0,0,0,0,37632] +; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,147] ; SSE41-NEXT: psrlw $8, %xmm2 ; SSE41-NEXT: packuswb %xmm2, %xmm1 ; SSE41-NEXT: paddb %xmm0, %xmm1 @@ -2971,7 +2971,7 @@ define <16 x i8> @pr38658(<16 x i8> %x) { ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,0,0,0,0,0,0,37632] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,147] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm1 @@ -3044,7 +3044,7 @@ define <16 x i8> @pr38658(<16 x i8> %x) { ; XOP: # %bb.0: ; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; XOP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; XOP-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,0,0,0,0,0,0,37632] +; XOP-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,147] ; XOP-NEXT: vpperm {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15],xmm2[1,3,5,7,9,11,13,15] ; XOP-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; XOP-NEXT: vpshab {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 diff --git a/llvm/test/CodeGen/X86/combine-shl.ll b/llvm/test/CodeGen/X86/combine-shl.ll index 1ce10c37..9548967 100644 --- a/llvm/test/CodeGen/X86/combine-shl.ll +++ b/llvm/test/CodeGen/X86/combine-shl.ll @@ -88,7 +88,7 @@ define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) { ; SSE2-NEXT: pmuludq %xmm0, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [32768,u,8192,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: movdqa %xmm1, %xmm0 @@ -97,7 +97,7 @@ define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) { ; SSE41-LABEL: combine_vec_shl_known_zero1: ; SSE41: # %bb.0: ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [65536,32768,16384,8192] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_vec_shl_known_zero1: @@ -198,16 +198,16 @@ define <4 x i32> @combine_vec_shl_shl1(<4 x i32> %x) { ; SSE2-LABEL: combine_vec_shl_shl1: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,64,256,1024] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [64,u,1024,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq ; ; SSE41-LABEL: combine_vec_shl_shl1: ; SSE41: # %bb.0: -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,64,256,1024] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_vec_shl_shl1: @@ -304,17 +304,17 @@ define <8 x i32> @combine_vec_shl_ext_shl2(<8 x i16> %x) { ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [131072,524288,2097152,8388608] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [524288,u,8388608,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] ; SSE2-NEXT: psrad $16, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [33554432,134217728,536870912,2147483648] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [134217728,u,2147483648,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: movdqa %xmm2, %xmm0 @@ -323,10 +323,10 @@ define <8 x i32> @combine_vec_shl_ext_shl2(<8 x i16> %x) { ; SSE41-LABEL: combine_vec_shl_ext_shl2: ; SSE41: # %bb.0: ; SSE41-NEXT: pmovsxwd %xmm0, %xmm2 -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [131072,524288,2097152,8388608] ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; SSE41-NEXT: pmovsxwd %xmm0, %xmm1 -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [33554432,134217728,536870912,2147483648] ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: retq ; @@ -673,9 +673,9 @@ define <4 x i32> @combine_vec_shl_add1(<4 x i32> %x) { ; SSE2-LABEL: combine_vec_shl_add1: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,8,16] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4,u,16,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -683,7 +683,7 @@ define <4 x i32> @combine_vec_shl_add1(<4 x i32> %x) { ; ; SSE41-LABEL: combine_vec_shl_add1: ; SSE41: # %bb.0: -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,8,16] ; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: retq ; @@ -726,9 +726,9 @@ define <4 x i32> @combine_vec_shl_or1(<4 x i32> %x) { ; SSE2-LABEL: combine_vec_shl_or1: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,8,16] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4,u,16,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -736,7 +736,7 @@ define <4 x i32> @combine_vec_shl_or1(<4 x i32> %x) { ; ; SSE41-LABEL: combine_vec_shl_or1: ; SSE41: # %bb.0: -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,8,16] ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: retq ; @@ -765,7 +765,7 @@ define <4 x i32> @combine_vec_shl_mul0(<4 x i32> %x) { ; ; SSE41-LABEL: combine_vec_shl_mul0: ; SSE41: # %bb.0: -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [20,20,20,20] ; SSE41-NEXT: retq ; ; AVX2-LABEL: combine_vec_shl_mul0: @@ -787,21 +787,21 @@ define <4 x i32> @combine_vec_shl_mul1(<4 x i32> %x) { ; SSE2-LABEL: combine_vec_shl_mul1: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [10,24,56,128] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [24,u,128,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq ; ; SSE41-LABEL: combine_vec_shl_mul1: ; SSE41: # %bb.0: -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [10,24,56,128] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_vec_shl_mul1: ; AVX: # %bb.0: -; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [10,24,56,128] ; AVX-NEXT: retq %1 = mul <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8> %2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4> @@ -813,9 +813,9 @@ define <4 x i32> @combine_vec_add_shl_nonsplat(<4 x i32> %a0) { ; SSE2-LABEL: combine_vec_add_shl_nonsplat: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,8,16,32] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [8,u,32,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -823,7 +823,7 @@ define <4 x i32> @combine_vec_add_shl_nonsplat(<4 x i32> %a0) { ; ; SSE41-LABEL: combine_vec_add_shl_nonsplat: ; SSE41: # %bb.0: -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,8,16,32] ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: retq ; @@ -852,7 +852,7 @@ define <4 x i32> @combine_vec_add_shl_and_nonsplat(<4 x i32> %a0) { ; SSE2-NEXT: pmuludq %xmm0, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [8,u,32,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -863,7 +863,7 @@ define <4 x i32> @combine_vec_add_shl_and_nonsplat(<4 x i32> %a0) { ; SSE41: # %bb.0: ; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7] -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,8,16,32] ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/combine-srem.ll b/llvm/test/CodeGen/X86/combine-srem.ll index 4b01c16..0ca79ad 100644 --- a/llvm/test/CodeGen/X86/combine-srem.ll +++ b/llvm/test/CodeGen/X86/combine-srem.ll @@ -272,7 +272,7 @@ define <4 x i32> @combine_vec_srem_by_pow2b(<4 x i32> %x) { ; SSE-NEXT: psrad $2, %xmm2 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7] -; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [1,2,4,8] ; SSE-NEXT: psubd %xmm2, %xmm0 ; SSE-NEXT: retq ; @@ -291,7 +291,7 @@ define <4 x i32> @combine_vec_srem_by_pow2b(<4 x i32> %x) { ; AVX1-NEXT: vpsrad $2, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,2,4,8] ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; @@ -336,7 +336,7 @@ define <4 x i32> @combine_vec_srem_by_pow2b_neg(<4 x i32> %x) { ; SSE-NEXT: psrld $1, %xmm1 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] -; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967294,4294967292,4294967288,4294967280] ; SSE-NEXT: paddd %xmm1, %xmm0 ; SSE-NEXT: retq ; @@ -358,7 +358,7 @@ define <4 x i32> @combine_vec_srem_by_pow2b_neg(<4 x i32> %x) { ; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [4294967294,4294967292,4294967288,4294967280] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; @@ -368,7 +368,7 @@ define <4 x i32> @combine_vec_srem_by_pow2b_neg(<4 x i32> %x) { ; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm1 ; AVX2-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [4294967294,4294967292,4294967288,4294967280] ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %1 = srem <4 x i32> %x, <i32 -2, i32 -4, i32 -8, i32 -16> diff --git a/llvm/test/CodeGen/X86/combine-udiv.ll b/llvm/test/CodeGen/X86/combine-udiv.ll index 5571519..233735d 100644 --- a/llvm/test/CodeGen/X86/combine-udiv.ll +++ b/llvm/test/CodeGen/X86/combine-udiv.ll @@ -502,11 +502,11 @@ define <8 x i16> @combine_vec_udiv_nonuniform(<8 x i16> %x) { ; SSE2-NEXT: por %xmm2, %xmm1 ; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [25645,61681,8195,9363,512,32769,32897,2] ; SSE2-NEXT: psubw %xmm1, %xmm0 -; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [32768,0,0,0] ; SSE2-NEXT: paddw %xmm1, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,0,65535,65535,0] ; SSE2-NEXT: pandn %xmm0, %xmm1 -; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,16,0,8,8,0,0,0,0,0,2,0,2,0,0,0] ; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -517,7 +517,7 @@ define <8 x i16> @combine_vec_udiv_nonuniform(<8 x i16> %x) { ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7] ; SSE41-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [25645,61681,8195,9363,512,32769,32897,2] ; SSE41-NEXT: psubw %xmm1, %xmm0 -; SSE41-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [32768,0,0,0] ; SSE41-NEXT: paddw %xmm1, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [4096,2048,8,u,u,2,2,u] ; SSE41-NEXT: pmulhuw %xmm0, %xmm1 @@ -530,7 +530,7 @@ define <8 x i16> @combine_vec_udiv_nonuniform(<8 x i16> %x) { ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7] ; AVX-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [25645,61681,8195,9363,512,32769,32897,2] ; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [32768,0,0,0] ; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [4096,2048,8,u,u,2,2,u] ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6],xmm0[7] @@ -541,7 +541,7 @@ define <8 x i16> @combine_vec_udiv_nonuniform(<8 x i16> %x) { ; XOP-NEXT: vpshlw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; XOP-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [25645,61681,8195,9363,512,32769,32897,2] ; XOP-NEXT: vpsubw %xmm1, %xmm0, %xmm0 -; XOP-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [32768,0,0,0] ; XOP-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; XOP-NEXT: vpshlw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; XOP-NEXT: retq @@ -630,7 +630,7 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) { ; SSE2-NEXT: pand %xmm1, %xmm2 ; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [171,0,0,0] ; SSE2-NEXT: psrlw $15, %xmm0 ; SSE2-NEXT: pandn %xmm0, %xmm1 ; SSE2-NEXT: por %xmm2, %xmm1 @@ -641,7 +641,7 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) { ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [171,0,0,0] ; SSE41-NEXT: psrlw $8, %xmm2 ; SSE41-NEXT: packuswb %xmm2, %xmm2 ; SSE41-NEXT: psrlw $7, %xmm2 @@ -654,7 +654,7 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) { ; AVX-LABEL: combine_vec_udiv_nonuniform4: ; AVX: # %bb.0: ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [171,0,0,0] ; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1 ; AVX-NEXT: vpackuswb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpsrlw $7, %xmm1, %xmm1 @@ -665,14 +665,12 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) { ; ; XOP-LABEL: combine_vec_udiv_nonuniform4: ; XOP: # %bb.0: -; XOP-NEXT: movl $171, %eax +; XOP-NEXT: movl $249, %eax ; XOP-NEXT: vmovd %eax, %xmm1 ; XOP-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; XOP-NEXT: vpmullw %xmm1, %xmm2, %xmm1 -; XOP-NEXT: vpsrlw $8, %xmm1, %xmm1 -; XOP-NEXT: movl $249, %eax -; XOP-NEXT: vmovd %eax, %xmm2 -; XOP-NEXT: vpshlb %xmm2, %xmm1, %xmm1 +; XOP-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [171,0,0,0] +; XOP-NEXT: vpsrlw $8, %xmm2, %xmm2 +; XOP-NEXT: vpshlb %xmm1, %xmm2, %xmm1 ; XOP-NEXT: vpmovsxwq {{.*#+}} xmm2 = [18446744073709551360,18446744073709551615] ; XOP-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0 ; XOP-NEXT: retq @@ -691,7 +689,7 @@ define <8 x i16> @pr38477(<8 x i16> %a0) { ; SSE2-NEXT: psubw %xmm3, %xmm0 ; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [u,32768,0,0,0,0,0,32768] ; SSE2-NEXT: paddw %xmm3, %xmm0 -; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [u,u,0,4,0,4,16,0,4,0,0,4,0,0,0,16] ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; SSE2-NEXT: por %xmm3, %xmm0 ; SSE2-NEXT: pand %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/combine-urem.ll b/llvm/test/CodeGen/X86/combine-urem.ll index 715d5c7..34c7d3d 100644 --- a/llvm/test/CodeGen/X86/combine-urem.ll +++ b/llvm/test/CodeGen/X86/combine-urem.ll @@ -327,7 +327,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) { ; SSE-NEXT: pslld $23, %xmm1 ; SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE-NEXT: cvttps2dq %xmm1, %xmm1 -; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,4,8,16] ; SSE-NEXT: pcmpeqd %xmm2, %xmm2 ; SSE-NEXT: paddd %xmm1, %xmm2 ; SSE-NEXT: pand %xmm2, %xmm0 @@ -338,7 +338,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) { ; AVX1-NEXT: vpslld $23, %xmm1, %xmm1 ; AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1 -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,4,8,16] ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/cpus-intel.ll b/llvm/test/CodeGen/X86/cpus-intel.ll index 40c38c2..646629d 100644 --- a/llvm/test/CodeGen/X86/cpus-intel.ll +++ b/llvm/test/CodeGen/X86/cpus-intel.ll @@ -38,6 +38,8 @@ ; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=lunarlake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty ; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=gracemont 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty ; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=pantherlake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty +; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=wildcatlake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty +; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=novalake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty ; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=clearwaterforest 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty ; RUN: llc < %s -o /dev/null -mtriple=i686-unknown-unknown -mcpu=diamondrapids 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty @@ -104,6 +106,8 @@ ; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=lunarlake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty ; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=gracemont 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty ; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=pantherlake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty +; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=wildcatlake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty +; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=novalake 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty ; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=clearwaterforest 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty ; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=diamondrapids 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll index 345b2b9..19b9452 100644 --- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll +++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll @@ -437,9 +437,9 @@ define <4 x i32> @shift_zext_shl2_vec(<4 x i8> %x) nounwind { ; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [512,256,128,64] ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [256,u,64,u] ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X64-NEXT: retq diff --git a/llvm/test/CodeGen/X86/funnel-shift.ll b/llvm/test/CodeGen/X86/funnel-shift.ll index df97f49..252cb33 100644 --- a/llvm/test/CodeGen/X86/funnel-shift.ll +++ b/llvm/test/CodeGen/X86/funnel-shift.ll @@ -574,9 +574,9 @@ define <4 x i32> @fshl_v4i32_undef1_cst(<4 x i32> %a0) nounwind { ; X86-SSE2-LABEL: fshl_v4i32_undef1_cst: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [512,1024,2048,4096] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [1024,u,4096,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-SSE2-NEXT: retl @@ -746,9 +746,9 @@ define <4 x i32> @fshr_v4i32_undef1_cst(<4 x i32> %a0) nounwind { ; X86-SSE2-LABEL: fshr_v4i32_undef1_cst: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [8388608,4194304,2097152,1048576] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [4194304,u,1048576,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-SSE2-NEXT: retl diff --git a/llvm/test/CodeGen/X86/global-variable-partition-with-dap.ll b/llvm/test/CodeGen/X86/global-variable-partition-with-dap.ll index a0c243b..b2b0a6d 100644 --- a/llvm/test/CodeGen/X86/global-variable-partition-with-dap.ll +++ b/llvm/test/CodeGen/X86/global-variable-partition-with-dap.ll @@ -1,18 +1,101 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" -;; A minimal test case. llc will crash if global variables already has a section -;; prefix. Subsequent PRs will expand on this test case to test the hotness -;; reconciliation implementation. +;; Requires asserts for -debug-only. +; REQUIRES: asserts -; RUN: not llc -mtriple=x86_64-unknown-linux-gnu -relocation-model=pic \ +; RUN: rm -rf %t && split-file %s %t && cd %t + +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -relocation-model=pic \ +; RUN: -partition-static-data-sections=true \ +; RUN: -debug-only=static-data-profile-info \ +; RUN: -data-sections=true -unique-section-names=false \ +; RUN: input-with-data-access-prof-on.ll -o - 2>&1 | FileCheck %s --check-prefixes=LOG,IR + +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -relocation-model=pic \ ; RUN: -partition-static-data-sections=true \ +; RUN: -debug-only=static-data-profile-info \ ; RUN: -data-sections=true -unique-section-names=false \ -; RUN: %s -o - 2>&1 | FileCheck %s --check-prefix=ERR +; RUN: input-with-data-access-prof-off.ll -o - 2>&1 | FileCheck %s --check-prefixes=OFF + +; LOG: hot_bss has section prefix hot, the max from data access profiles as hot and PGO counters as hot +; LOG: data_unknown_hotness has section prefix <empty>, the max from data access profiles as <empty> and PGO counters as unlikely +; LOG: external_relro_array has section prefix unlikely, solely from data access profiles + +; IR: .type hot_bss,@object +; IR-NEXT: .section .bss.hot.,"aw" +; IR: .type data_unknown_hotness,@object +; IR-NEXT: .section .data,"aw" +; IR: .type external_relro_array,@object +; IR-NEXT: .section .data.rel.ro.unlikely.,"aw" + + +; OFF: .type hot_bss,@object +; OFF-NEXT: .section .bss.hot.,"aw" +; OFF: .type data_unknown_hotness,@object +; OFF-NEXT: .section .data.unlikely.,"aw" +;; Global variable section prefix metadata is not used when +;; module flag `EnableDataAccessProf` is 0, and @external_relro_array has +;; external linkage, so analysis based on PGO counters doesn't apply. +; OFF: .type external_relro_array,@object # @external_relro_array +; OFF-NEXT: .section .data.rel.ro,"aw" + +;--- input-with-data-access-prof-on.ll +; Internal vars +@hot_bss = internal global i32 0, !section_prefix !17 +@data_unknown_hotness = internal global i32 1 +; External vars +@external_relro_array = constant [2 x ptr] [ptr @hot_bss, ptr @data_unknown_hotness], !section_prefix !18 + +define void @cold_func() !prof !15 { + %9 = load i32, ptr @data_unknown_hotness + %11 = call i32 (...) @func_taking_arbitrary_param(i32 %9) + ret void +} + +define void @hot_func() !prof !14 { + %9 = load i32, ptr @hot_bss + %11 = call i32 (...) @func_taking_arbitrary_param(i32 %9) + ret void +} + +declare i32 @func_taking_arbitrary_param(...) -; ERR: Global variable hot_bss already has a section prefix hot +!llvm.module.flags = !{!0, !1} +!0 = !{i32 2, !"EnableDataAccessProf", i32 1} +!1 = !{i32 1, !"ProfileSummary", !2} +!2 = !{!3, !4, !5, !6, !7, !8, !9, !10} +!3 = !{!"ProfileFormat", !"InstrProf"} +!4 = !{!"TotalCount", i64 1460183} +!5 = !{!"MaxCount", i64 849024} +!6 = !{!"MaxInternalCount", i64 32769} +!7 = !{!"MaxFunctionCount", i64 849024} +!8 = !{!"NumCounts", i64 23627} +!9 = !{!"NumFunctions", i64 3271} +!10 = !{!"DetailedSummary", !11} +!11 = !{!12, !13} +!12 = !{i32 990000, i64 166, i32 73} +!13 = !{i32 999999, i64 3, i32 1443} +!14 = !{!"function_entry_count", i64 100000} +!15 = !{!"function_entry_count", i64 1} +!16 = !{!"branch_weights", i32 1, i32 99999} +!17 = !{!"section_prefix", !"hot"} +!18 = !{!"section_prefix", !"unlikely"} + +;--- input-with-data-access-prof-off.ll +; Same as file above except that module flag `EnableDataAccessProf` has value 0. +; Internal vars @hot_bss = internal global i32 0, !section_prefix !17 +@data_unknown_hotness = internal global i32 1 +; External vars +@external_relro_array = constant [2 x ptr] [ptr @hot_bss, ptr @data_unknown_hotness], !section_prefix !18 + +define void @cold_func() !prof !15 { + %9 = load i32, ptr @data_unknown_hotness + %11 = call i32 (...) @func_taking_arbitrary_param(i32 %9) + ret void +} define void @hot_func() !prof !14 { %9 = load i32, ptr @hot_bss @@ -22,8 +105,9 @@ define void @hot_func() !prof !14 { declare i32 @func_taking_arbitrary_param(...) -!llvm.module.flags = !{!1} +!llvm.module.flags = !{!0, !1} +!0 = !{i32 2, !"EnableDataAccessProf", i32 0} !1 = !{i32 1, !"ProfileSummary", !2} !2 = !{!3, !4, !5, !6, !7, !8, !9, !10} !3 = !{!"ProfileFormat", !"InstrProf"} @@ -41,3 +125,4 @@ declare i32 @func_taking_arbitrary_param(...) !15 = !{!"function_entry_count", i64 1} !16 = !{!"branch_weights", i32 1, i32 99999} !17 = !{!"section_prefix", !"hot"} +!18 = !{!"section_prefix", !"unlikely"} diff --git a/llvm/test/CodeGen/X86/global-variable-partition.ll b/llvm/test/CodeGen/X86/global-variable-partition.ll index ce06d17..604b4fd 100644 --- a/llvm/test/CodeGen/X86/global-variable-partition.ll +++ b/llvm/test/CodeGen/X86/global-variable-partition.ll @@ -106,23 +106,31 @@ target triple = "x86_64-unknown-linux-gnu" ; UNIQ-NEXT: .section .data.unlikely.,"aw",@progbits,unique,8 ; AGG-NEXT: .section .data.unlikely.,"aw",@progbits +;; The `.section` directive is omitted for .data with -unique-section-names=false. +; See MCSectionELF::shouldOmitSectionDirective for the implementation details. + ; For @data_with_unknown_hotness ; SYM: .type .Ldata_with_unknown_hotness,@object # @data_with_unknown_hotness ; SYM: .section .data..Ldata_with_unknown_hotness,"aw",@progbits ; UNIQ: .section .data,"aw",@progbits,unique,9 -; The `.section` directive is omitted for .data with -unique-section-names=false. -; See MCSectionELF::shouldOmitSectionDirective for the implementation details. + ; AGG: .data ; COMMON: .Ldata_with_unknown_hotness: -; For @hot_data_custom_bar_section -; It has an explicit section attribute 'var' and shouldn't have hot or unlikely suffix. +; For variables that are not eligible for section prefix annotation ; COMMON: .type hot_data_custom_bar_section,@object ; SYM-NEXT: .section bar,"aw",@progbits ; SYM: hot_data_custom_bar_section ; UNIQ: .section bar,"aw",@progbits ; AGG: .section bar,"aw",@progbits +; SYM: .section .data.llvm.fake_var,"aw" +; UNIQ: .section .data,"aw" +; AGG: .data + +;; No section for linker declaration +; COMMON-NOT: qux + @.str = private unnamed_addr constant [5 x i8] c"hot\09\00", align 1 @.str.1 = private unnamed_addr constant [10 x i8] c"%d\09%d\09%d\0A\00", align 1 @hot_relro_array = internal constant [2 x ptr] [ptr @bss2, ptr @data3] @@ -137,6 +145,8 @@ target triple = "x86_64-unknown-linux-gnu" @data3 = internal global i32 3 @data_with_unknown_hotness = private global i32 5 @hot_data_custom_bar_section = internal global i32 101 #0 +@llvm.fake_var = internal global i32 123 +@qux = external global i64 define void @cold_func(i32 %0) !prof !15 { %2 = load i32, ptr @cold_bss diff --git a/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll b/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll index 1a2aac6..b45d01e 100644 --- a/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll +++ b/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll @@ -499,9 +499,9 @@ define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind { ; X86-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [0,1,16776960,2147483648] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 # [1,u,2147483648,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; X86-SSE2-NEXT: pand %xmm1, %xmm0 @@ -524,9 +524,9 @@ define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind { ; X64-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; X64-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,1,16776960,2147483648] ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [1,u,2147483648,u] ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; X64-SSE2-NEXT: pand %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/known-pow2.ll b/llvm/test/CodeGen/X86/known-pow2.ll index e183bbc..019bca7 100644 --- a/llvm/test/CodeGen/X86/known-pow2.ll +++ b/llvm/test/CodeGen/X86/known-pow2.ll @@ -28,16 +28,16 @@ define <4 x i32> @pow2_non_splat_vec_fail0(<4 x i32> %x) { ; CHECK-NEXT: pmuludq %xmm0, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [1073741824,u,67108864,u] ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3] ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] ; CHECK-NEXT: movdqa %xmm1, %xmm3 ; CHECK-NEXT: psrld $1, %xmm3 ; CHECK-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3] -; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [9,4,16,64] ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3] ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [4,u,64,u] ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; CHECK-NEXT: psubd %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/madd.ll b/llvm/test/CodeGen/X86/madd.ll index bdb7c30..2a2a4a5 100644 --- a/llvm/test/CodeGen/X86/madd.ll +++ b/llvm/test/CodeGen/X86/madd.ll @@ -2057,10 +2057,10 @@ define <4 x i32> @pmaddwd_negative2(<8 x i16> %A) { ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] ; SSE2-NEXT: psrad $16, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294934528,0,0,0] ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] ; SSE2-NEXT: pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,0,7,0,42,0,32,0] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [32768,4294934528,0,0] ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[0,2] ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3] ; SSE2-NEXT: paddd %xmm2, %xmm1 @@ -2071,15 +2071,15 @@ define <4 x i32> @pmaddwd_negative2(<8 x i16> %A) { ; AVX1: # %bb.0: ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] -; AVX1-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,7,42,32] +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [32768,4294934528,0,0] ; AVX1-NEXT: vphaddd %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; ; AVX256-LABEL: pmaddwd_negative2: ; AVX256: # %bb.0: ; AVX256-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX256-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX256-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [32768,4294934528,0,0,1,7,42,32] ; AVX256-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX256-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ; AVX256-NEXT: vzeroupper diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll index 4cde581..caec02e 100644 --- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll +++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll @@ -4765,6 +4765,66 @@ define void @scaleidx_scatter_outofrange(<8 x float> %value, ptr %base, <8 x i32 } declare void @llvm.masked.scatter.v8f32.v8p0(<8 x float>, <8 x ptr>, i32 immarg, <8 x i1>) +define <16 x i32> @pr163023_sext(ptr %a0, <16 x i32> %a1) { +; X64-LABEL: pr163023_sext: +; X64: # %bb.0: +; X64-NEXT: kxnorw %k0, %k0, %k1 +; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X64-NEXT: vpgatherdd (%rdi,%zmm0), %zmm1 {%k1} +; X64-NEXT: vmovdqa64 %zmm1, %zmm0 +; X64-NEXT: retq +; +; X86-LABEL: pr163023_sext: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: kxnorw %k0, %k0, %k1 +; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X86-NEXT: vpgatherdd (%eax,%zmm0), %zmm1 {%k1} +; X86-NEXT: vmovdqa64 %zmm1, %zmm0 +; X86-NEXT: retl + %addr.p = ptrtoint ptr %a0 to i64 + %addr.v = insertelement <1 x i64> poison, i64 %addr.p, i64 0 + %addr.splat = shufflevector <1 x i64> %addr.v, <1 x i64> poison, <16 x i32> zeroinitializer + %ofs = sext <16 x i32> %a1 to <16 x i64> + %addr = add nuw <16 x i64> %addr.splat, %ofs + %ptr = inttoptr <16 x i64> %addr to <16 x ptr> + %gather = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> %ptr, i32 4, <16 x i1> splat (i1 true), <16 x i32> poison) + ret <16 x i32> %gather +} + +define <16 x i32> @pr163023_zext(ptr %a0, <16 x i32> %a1) { +; X64-LABEL: pr163023_zext: +; X64: # %bb.0: +; X64-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero +; X64-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; X64-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero +; X64-NEXT: kxnorw %k0, %k0, %k1 +; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; X64-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; X64-NEXT: kxnorw %k0, %k0, %k2 +; X64-NEXT: vpgatherqd (%rdi,%zmm0), %ymm3 {%k2} +; X64-NEXT: vpgatherqd (%rdi,%zmm1), %ymm2 {%k1} +; X64-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm0 +; X64-NEXT: retq +; +; X86-LABEL: pr163023_zext: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: kxnorw %k0, %k0, %k1 +; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X86-NEXT: vpgatherdd (%eax,%zmm0), %zmm1 {%k1} +; X86-NEXT: vmovdqa64 %zmm1, %zmm0 +; X86-NEXT: retl + %addr.p = ptrtoint ptr %a0 to i64 + %addr.v = insertelement <1 x i64> poison, i64 %addr.p, i64 0 + %addr.splat = shufflevector <1 x i64> %addr.v, <1 x i64> poison, <16 x i32> zeroinitializer + %ofs = zext <16 x i32> %a1 to <16 x i64> + %addr = add nuw <16 x i64> %addr.splat, %ofs + %ptr = inttoptr <16 x i64> %addr to <16 x ptr> + %gather = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> %ptr, i32 4, <16 x i1> splat (i1 true), <16 x i32> poison) + ret <16 x i32> %gather +} + ; ; PR45906 ; This used to cause fast-isel to generate bad copy instructions that would diff --git a/llvm/test/CodeGen/X86/min-legal-vector-width.ll b/llvm/test/CodeGen/X86/min-legal-vector-width.ll index d752659..04f0a65 100644 --- a/llvm/test/CodeGen/X86/min-legal-vector-width.ll +++ b/llvm/test/CodeGen/X86/min-legal-vector-width.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skylake-avx512 -mattr=prefer-256-bit | FileCheck %s --check-prefixes=CHECK,CHECK-SKX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skylake-avx512 -mattr=prefer-256-bit | FileCheck %s --check-prefixes=CHECK,CHECK-SKX,CHECK-SKX-NOVBMI ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skylake-avx512 -mattr=prefer-256-bit,avx512vbmi | FileCheck %s --check-prefixes=CHECK,CHECK-SKX,CHECK-SKX-VBMI ; Make sure CPUs default to prefer-256-bit. avx512vnni isn't interesting as it just adds an isel peephole for vpmaddwd+vpaddd ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skylake-avx512 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX512 @@ -883,6 +883,30 @@ define <16 x i16> @test_16f32tosb_512(ptr %ptr, <16 x i16> %passthru) "min-legal } define dso_local void @mul256(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="256" { +; CHECK-SKX-NOVBMI-LABEL: mul256: +; CHECK-SKX-NOVBMI: # %bb.0: +; CHECK-SKX-NOVBMI-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-SKX-NOVBMI-NEXT: vmovdqa 32(%rdi), %ymm1 +; CHECK-SKX-NOVBMI-NEXT: vmovdqa (%rsi), %ymm2 +; CHECK-SKX-NOVBMI-NEXT: vmovdqa 32(%rsi), %ymm3 +; CHECK-SKX-NOVBMI-NEXT: vpbroadcastd {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; CHECK-SKX-NOVBMI-NEXT: vpand %ymm4, %ymm3, %ymm5 +; CHECK-SKX-NOVBMI-NEXT: vpmaddubsw %ymm5, %ymm1, %ymm5 +; CHECK-SKX-NOVBMI-NEXT: vpandn %ymm3, %ymm4, %ymm3 +; CHECK-SKX-NOVBMI-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1 +; CHECK-SKX-NOVBMI-NEXT: vpsllw $8, %ymm1, %ymm1 +; CHECK-SKX-NOVBMI-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 | (ymm5 & ymm4) +; CHECK-SKX-NOVBMI-NEXT: vpand %ymm4, %ymm2, %ymm3 +; CHECK-SKX-NOVBMI-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3 +; CHECK-SKX-NOVBMI-NEXT: vpandn %ymm2, %ymm4, %ymm2 +; CHECK-SKX-NOVBMI-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm0 +; CHECK-SKX-NOVBMI-NEXT: vpsllw $8, %ymm0, %ymm0 +; CHECK-SKX-NOVBMI-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm3 & ymm4) +; CHECK-SKX-NOVBMI-NEXT: vmovdqa %ymm0, (%rdx) +; CHECK-SKX-NOVBMI-NEXT: vmovdqa %ymm1, 32(%rdx) +; CHECK-SKX-NOVBMI-NEXT: vzeroupper +; CHECK-SKX-NOVBMI-NEXT: retq +; ; CHECK-SKX-VBMI-LABEL: mul256: ; CHECK-SKX-VBMI: # %bb.0: ; CHECK-SKX-VBMI-NEXT: vmovdqa (%rdi), %ymm0 @@ -960,6 +984,21 @@ define dso_local void @mul256(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"=" } define dso_local void @mul512(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="512" { +; CHECK-SKX-NOVBMI-LABEL: mul512: +; CHECK-SKX-NOVBMI: # %bb.0: +; CHECK-SKX-NOVBMI-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-SKX-NOVBMI-NEXT: vmovdqa64 (%rsi), %zmm1 +; CHECK-SKX-NOVBMI-NEXT: vpbroadcastd {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; CHECK-SKX-NOVBMI-NEXT: vpandq %zmm2, %zmm1, %zmm3 +; CHECK-SKX-NOVBMI-NEXT: vpmaddubsw %zmm3, %zmm0, %zmm3 +; CHECK-SKX-NOVBMI-NEXT: vpandnq %zmm1, %zmm2, %zmm1 +; CHECK-SKX-NOVBMI-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm0 +; CHECK-SKX-NOVBMI-NEXT: vpsllw $8, %zmm0, %zmm0 +; CHECK-SKX-NOVBMI-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | (zmm3 & zmm2) +; CHECK-SKX-NOVBMI-NEXT: vmovdqa64 %zmm0, (%rdx) +; CHECK-SKX-NOVBMI-NEXT: vzeroupper +; CHECK-SKX-NOVBMI-NEXT: retq +; ; CHECK-SKX-VBMI-LABEL: mul512: ; CHECK-SKX-VBMI: # %bb.0: ; CHECK-SKX-VBMI-NEXT: vmovdqa64 (%rdi), %zmm0 @@ -1137,6 +1176,14 @@ define <16 x i16> @trunc_v16i32_v16i16_zeroes(ptr %x) nounwind "min-legal-vector } define <32 x i8> @trunc_v32i16_v32i8_zeroes(ptr %x) nounwind "min-legal-vector-width"="256" { +; CHECK-SKX-NOVBMI-LABEL: trunc_v32i16_v32i8_zeroes: +; CHECK-SKX-NOVBMI: # %bb.0: +; CHECK-SKX-NOVBMI-NEXT: vpsrlw $8, 32(%rdi), %ymm0 +; CHECK-SKX-NOVBMI-NEXT: vpsrlw $8, (%rdi), %ymm1 +; CHECK-SKX-NOVBMI-NEXT: vpackuswb %ymm0, %ymm1, %ymm0 +; CHECK-SKX-NOVBMI-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; CHECK-SKX-NOVBMI-NEXT: retq +; ; CHECK-SKX-VBMI-LABEL: trunc_v32i16_v32i8_zeroes: ; CHECK-SKX-VBMI: # %bb.0: ; CHECK-SKX-VBMI-NEXT: vmovdqa (%rdi), %ymm1 @@ -1192,6 +1239,14 @@ define <16 x i16> @trunc_v16i32_v16i16_sign(ptr %x) nounwind "min-legal-vector-w } define <32 x i8> @trunc_v32i16_v32i8_sign(ptr %x) nounwind "min-legal-vector-width"="256" { +; CHECK-SKX-NOVBMI-LABEL: trunc_v32i16_v32i8_sign: +; CHECK-SKX-NOVBMI: # %bb.0: +; CHECK-SKX-NOVBMI-NEXT: vpsrlw $8, 32(%rdi), %ymm0 +; CHECK-SKX-NOVBMI-NEXT: vpsrlw $8, (%rdi), %ymm1 +; CHECK-SKX-NOVBMI-NEXT: vpackuswb %ymm0, %ymm1, %ymm0 +; CHECK-SKX-NOVBMI-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; CHECK-SKX-NOVBMI-NEXT: retq +; ; CHECK-SKX-VBMI-LABEL: trunc_v32i16_v32i8_sign: ; CHECK-SKX-VBMI: # %bb.0: ; CHECK-SKX-VBMI-NEXT: vmovdqa (%rdi), %ymm1 diff --git a/llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll b/llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll index 693d199..9729fd7 100644 --- a/llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll +++ b/llvm/test/CodeGen/X86/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll @@ -100,7 +100,7 @@ define <4 x i1> @p4_vector_urem_by_const__splat(<4 x i32> %x, <4 x i32> %y) { ; SSE4-LABEL: p4_vector_urem_by_const__splat: ; SSE4: # %bb.0: ; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; SSE4-NEXT: psrld $1, %xmm0 ; SSE4-NEXT: movdqa {{.*#+}} xmm1 = [715827883,715827883,715827883,715827883] ; SSE4-NEXT: pcmpgtd %xmm0, %xmm1 @@ -128,10 +128,10 @@ define <4 x i1> @p5_vector_urem_by_const__nonsplat(<4 x i32> %x, <4 x i32> %y) { ; SSE2: # %bb.0: ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,u,954437177,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,3435973837,2863311531,954437177] +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,2147483648,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE2-NEXT: psrlq $32, %xmm0 @@ -145,7 +145,7 @@ define <4 x i1> @p5_vector_urem_by_const__nonsplat(<4 x i32> %x, <4 x i32> %y) { ; SSE4-LABEL: p5_vector_urem_by_const__nonsplat: ; SSE4: # %bb.0: ; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,3435973837,2863311531,954437177] ; SSE4-NEXT: pmovzxdq {{.*#+}} xmm1 = [1,2147483648] ; SSE4-NEXT: pmuludq %xmm0, %xmm1 ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] @@ -159,7 +159,7 @@ define <4 x i1> @p5_vector_urem_by_const__nonsplat(<4 x i32> %x, <4 x i32> %y) { ; AVX2-LABEL: p5_vector_urem_by_const__nonsplat: ; AVX2: # %bb.0: ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,3435973837,2863311531,954437177] ; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -196,7 +196,7 @@ define <4 x i1> @p6_vector_urem_by_const__nonsplat_undef0(<4 x i32> %x, <4 x i32 ; SSE4-LABEL: p6_vector_urem_by_const__nonsplat_undef0: ; SSE4: # %bb.0: ; SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; SSE4-NEXT: movdqa %xmm0, %xmm1 ; SSE4-NEXT: psrld $1, %xmm1 ; SSE4-NEXT: pslld $31, %xmm0 @@ -312,7 +312,7 @@ define <4 x i1> @p8_vector_urem_by_const__nonsplat_undef3(<4 x i32> %x, <4 x i32 ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; SSE4-NEXT: psrld $2, %xmm2 -; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [6,6,6,6] ; SSE4-NEXT: psubd %xmm2, %xmm0 ; SSE4-NEXT: pxor %xmm1, %xmm1 ; SSE4-NEXT: pcmpeqd %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll index 9aee2f1..00731fe 100644 --- a/llvm/test/CodeGen/X86/pmul.ll +++ b/llvm/test/CodeGen/X86/pmul.ll @@ -91,7 +91,7 @@ define <4 x i32> @mul_v4i32c(<4 x i32> %i) nounwind { ; ; SSE41-LABEL: mul_v4i32c: ; SSE41: # %bb.0: # %entry -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [117,117,117,117] ; SSE41-NEXT: retq ; ; AVX-LABEL: mul_v4i32c: diff --git a/llvm/test/CodeGen/X86/pr160612.ll b/llvm/test/CodeGen/X86/pr160612.ll new file mode 100644 index 0000000..6572c42 --- /dev/null +++ b/llvm/test/CodeGen/X86/pr160612.ll @@ -0,0 +1,74 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -O2 | FileCheck %s + +; Test for issue #160612: OR conditions in branches should use multiple branches +; instead of materializing booleans with SETCC when no special optimizations apply. + +declare void @subroutine_foo() +declare void @subroutine_bar() + +; Original issue: (x == 0 || y == 0) was generating SETCC + TEST + BRANCH +; instead of using two conditional branches directly. +define void @func_a(i32 noundef %x, i32 noundef %y) { +; CHECK-LABEL: func_a: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: testl %edi, %edi +; CHECK-NEXT: je subroutine_foo@PLT # TAILCALL +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: testl %esi, %esi +; CHECK-NEXT: jne subroutine_bar@PLT # TAILCALL +; CHECK-NEXT: # %bb.2: # %if.then +; CHECK-NEXT: jmp subroutine_foo@PLT # TAILCALL +entry: + %cmp = icmp eq i32 %x, 0 + %cmp1 = icmp eq i32 %y, 0 + %or.cond = or i1 %cmp, %cmp1 + br i1 %or.cond, label %if.then, label %if.else + +if.then: + tail call void @subroutine_foo() + br label %if.end + +if.else: + tail call void @subroutine_bar() + br label %if.end + +if.end: + ret void +} + +; Reference implementation that already generated optimal code. +; This should continue to generate the same optimal code. +define void @func_b(i32 noundef %x, i32 noundef %y) { +; CHECK-LABEL: func_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: testl %edi, %edi +; CHECK-NEXT: je subroutine_foo@PLT # TAILCALL +; CHECK-NEXT: # %bb.1: # %if.else +; CHECK-NEXT: testl %esi, %esi +; CHECK-NEXT: je subroutine_foo@PLT # TAILCALL +; CHECK-NEXT: # %bb.2: # %if.else3 +; CHECK-NEXT: jmp subroutine_bar@PLT # TAILCALL +entry: + %cmp = icmp eq i32 %x, 0 + br i1 %cmp, label %if.then, label %if.else + +if.then: + tail call void @subroutine_foo() + br label %if.end4 + +if.else: + %cmp1 = icmp eq i32 %y, 0 + br i1 %cmp1, label %if.then2, label %if.else3 + +if.then2: + tail call void @subroutine_foo() + br label %if.end4 + +if.else3: + tail call void @subroutine_bar() + br label %if.end4 + +if.end4: + ret void +} diff --git a/llvm/test/CodeGen/X86/pr162812.ll b/llvm/test/CodeGen/X86/pr162812.ll index 4ea3101..cec093c 100644 --- a/llvm/test/CodeGen/X86/pr162812.ll +++ b/llvm/test/CodeGen/X86/pr162812.ll @@ -34,61 +34,43 @@ define <32 x i8> @PR162812(<32 x i8> %a, <32 x i8> %mask) { ; ; SSE42-LABEL: PR162812: ; SSE42: # %bb.0: -; SSE42-NEXT: movdqa %xmm2, %xmm5 -; SSE42-NEXT: movdqa %xmm0, %xmm2 +; SSE42-NEXT: movdqa %xmm0, %xmm4 +; SSE42-NEXT: psrlw $2, %xmm2 +; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [8224,8224,8224,8224,8224,8224,8224,8224] +; SSE42-NEXT: pand %xmm5, %xmm2 +; SSE42-NEXT: paddb %xmm2, %xmm2 +; SSE42-NEXT: paddb %xmm2, %xmm2 ; SSE42-NEXT: movdqa %xmm0, %xmm6 -; SSE42-NEXT: psllw $2, %xmm6 -; SSE42-NEXT: movdqa {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; SSE42-NEXT: pand %xmm7, %xmm6 -; SSE42-NEXT: psrlw $2, %xmm5 -; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [8224,8224,8224,8224,8224,8224,8224,8224] -; SSE42-NEXT: pand %xmm4, %xmm5 +; SSE42-NEXT: paddb %xmm0, %xmm6 +; SSE42-NEXT: movdqa %xmm2, %xmm0 +; SSE42-NEXT: pblendvb %xmm0, %xmm6, %xmm4 +; SSE42-NEXT: psrlw $2, %xmm3 +; SSE42-NEXT: pand %xmm3, %xmm5 ; SSE42-NEXT: paddb %xmm5, %xmm5 -; SSE42-NEXT: movdqa %xmm5, %xmm0 -; SSE42-NEXT: pblendvb %xmm0, %xmm6, %xmm2 -; SSE42-NEXT: movdqa %xmm2, %xmm6 -; SSE42-NEXT: paddb %xmm2, %xmm6 ; SSE42-NEXT: paddb %xmm5, %xmm5 +; SSE42-NEXT: movdqa %xmm1, %xmm2 +; SSE42-NEXT: paddb %xmm1, %xmm2 ; SSE42-NEXT: movdqa %xmm5, %xmm0 -; SSE42-NEXT: pblendvb %xmm0, %xmm6, %xmm2 -; SSE42-NEXT: movdqa %xmm1, %xmm5 -; SSE42-NEXT: psllw $2, %xmm5 -; SSE42-NEXT: pand %xmm7, %xmm5 -; SSE42-NEXT: psrlw $2, %xmm3 -; SSE42-NEXT: pand %xmm3, %xmm4 -; SSE42-NEXT: paddb %xmm4, %xmm4 -; SSE42-NEXT: movdqa %xmm4, %xmm0 -; SSE42-NEXT: pblendvb %xmm0, %xmm5, %xmm1 -; SSE42-NEXT: movdqa %xmm1, %xmm3 -; SSE42-NEXT: paddb %xmm1, %xmm3 -; SSE42-NEXT: paddb %xmm4, %xmm4 +; SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE42-NEXT: movdqa %xmm4, %xmm0 -; SSE42-NEXT: pblendvb %xmm0, %xmm3, %xmm1 -; SSE42-NEXT: movdqa %xmm2, %xmm0 ; SSE42-NEXT: retq ; ; AVX2-LABEL: PR162812: ; AVX2: # %bb.0: -; AVX2-NEXT: vpsllw $2, %ymm0, %ymm2 -; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 +; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2 ; AVX2-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 -; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: PR162812: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $2, %ymm0, %ymm2 -; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm2, %ymm2 +; AVX512-NEXT: vpaddb %ymm0, %ymm0, %ymm2 ; AVX512-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm1 ; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1 -; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpaddb %ymm0, %ymm0, %ymm2 ; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512-NEXT: retq diff --git a/llvm/test/CodeGen/X86/pr49087.ll b/llvm/test/CodeGen/X86/pr49087.ll deleted file mode 100644 index 1a29222..0000000 --- a/llvm/test/CodeGen/X86/pr49087.ll +++ /dev/null @@ -1,30 +0,0 @@ -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -o - -global-isel < %s 2>&1 | FileCheck %s -; REQUIRES: asserts -; XFAIL: * - -define i32 @test_01(ptr %p, i64 %len, i32 %x) { -; CHECK-LABEL: test_01 - -entry: - %scevgep = getelementptr i32, ptr %p, i64 -1 - br label %loop - -loop: ; preds = %backedge, %entry - %iv = phi i64 [ %iv.next, %backedge ], [ %len, %entry ] - %iv.next = add i64 %iv, -1 - %cond_1 = icmp eq i64 %iv, 0 - br i1 %cond_1, label %exit, label %backedge - -backedge: ; preds = %loop - %scevgep1 = getelementptr i32, ptr %scevgep, i64 %iv - %loaded = load atomic i32, ptr %scevgep1 unordered, align 4 - %cond_2 = icmp eq i32 %loaded, %x - br i1 %cond_2, label %failure, label %loop - -exit: ; preds = %loop - ret i32 -1 - -failure: - unreachable -} - diff --git a/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll b/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll index 885b075..59b03f8 100644 --- a/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll +++ b/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll @@ -9,7 +9,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) { ; AVX256BW: # %bb.0: ; AVX256BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX256BW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] -; AVX256BW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX256BW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; AVX256BW-NEXT: vpmullw %ymm3, %ymm2, %ymm2 ; AVX256BW-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX256BW-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] diff --git a/llvm/test/CodeGen/X86/relptr-rodata.ll b/llvm/test/CodeGen/X86/relptr-rodata.ll index ea22b08..954ea8f 100644 --- a/llvm/test/CodeGen/X86/relptr-rodata.ll +++ b/llvm/test/CodeGen/X86/relptr-rodata.ll @@ -10,16 +10,31 @@ target triple = "x86_64-unknown-linux-gnu" ; CHECK: .long hidden-rodata @rodata = hidden constant i32 trunc (i64 sub (i64 ptrtoint (ptr @hidden to i64), i64 ptrtoint (ptr @rodata to i64)) to i32) +; CHECK: .section .rodata.rodata_ptrtoaddr +; CHECK: rodata_ptrtoaddr: +; CHECK: .long hidden-rodata_ptrtoaddr +@rodata_ptrtoaddr = hidden constant i32 trunc (i64 sub (i64 ptrtoaddr (ptr @hidden to i64), i64 ptrtoaddr (ptr @rodata_ptrtoaddr to i64)) to i32) + ; CHECK: .section .data.rel.ro.relro1 ; CHECK: relro1: ; CHECK: .long default-relro1 @relro1 = hidden constant i32 trunc (i64 sub (i64 ptrtoint (ptr @default to i64), i64 ptrtoint (ptr @relro1 to i64)) to i32) +; CHECK: .section .data.rel.ro.relro1_ptrtoaddr +; CHECK: relro1_ptrtoaddr: +; CHECK: .long default-relro1_ptrtoaddr +@relro1_ptrtoaddr = hidden constant i32 trunc (i64 sub (i64 ptrtoaddr (ptr @default to i64), i64 ptrtoaddr (ptr @relro1_ptrtoaddr to i64)) to i32) + ; CHECK: .section .data.rel.ro.relro2 ; CHECK: relro2: ; CHECK: .long hidden-relro2 @relro2 = constant i32 trunc (i64 sub (i64 ptrtoint (ptr @hidden to i64), i64 ptrtoint (ptr @relro2 to i64)) to i32) +; CHECK: .section .data.rel.ro.relro2_ptrtoaddr +; CHECK: relro2_ptrtoaddr: +; CHECK: .long hidden-relro2_ptrtoaddr +@relro2_ptrtoaddr = constant i32 trunc (i64 sub (i64 ptrtoaddr (ptr @hidden to i64), i64 ptrtoaddr (ptr @relro2_ptrtoaddr to i64)) to i32) + ; CHECK: .section .rodata.obj ; CHECK-NEXT: .globl obj ; CHECK: obj: diff --git a/llvm/test/CodeGen/X86/rotate-extract-vector.ll b/llvm/test/CodeGen/X86/rotate-extract-vector.ll index 1ead3f9..7d0ec64 100644 --- a/llvm/test/CodeGen/X86/rotate-extract-vector.ll +++ b/llvm/test/CodeGen/X86/rotate-extract-vector.ll @@ -149,19 +149,12 @@ define <32 x i16> @illegal_no_extract_mul(<32 x i16> %i) nounwind { ; Result would undershift define <4 x i64> @no_extract_shl(<4 x i64> %i) nounwind { -; X86-LABEL: no_extract_shl: -; X86: # %bb.0: -; X86-NEXT: vpsllq $24, %ymm0, %ymm1 -; X86-NEXT: vpsrlq $39, %ymm0, %ymm0 -; X86-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %ymm1, %ymm0 -; X86-NEXT: retl -; -; X64-LABEL: no_extract_shl: -; X64: # %bb.0: -; X64-NEXT: vpsllq $24, %ymm0, %ymm1 -; X64-NEXT: vpsrlq $39, %ymm0, %ymm0 -; X64-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0 -; X64-NEXT: retq +; CHECK-LABEL: no_extract_shl: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllq $24, %ymm0, %ymm1 +; CHECK-NEXT: vpsrlq $39, %ymm0, %ymm0 +; CHECK-NEXT: vpternlogq {{.*#+}} ymm0 = (ymm0 & m64bcst) | ymm1 +; CHECK-NEXT: ret{{[l|q]}} %lhs_mul = shl <4 x i64> %i, <i64 11, i64 11, i64 11, i64 11> %rhs_mul = shl <4 x i64> %i, <i64 24, i64 24, i64 24, i64 24> %lhs_shift = lshr <4 x i64> %lhs_mul, <i64 50, i64 50, i64 50, i64 50> @@ -171,19 +164,12 @@ define <4 x i64> @no_extract_shl(<4 x i64> %i) nounwind { ; Result would overshift define <4 x i32> @no_extract_shrl(<4 x i32> %i) nounwind { -; X86-LABEL: no_extract_shrl: -; X86: # %bb.0: -; X86-NEXT: vpsrld $9, %xmm0, %xmm1 -; X86-NEXT: vpslld $25, %xmm0, %xmm0 -; X86-NEXT: vpternlogd $236, {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm1, %xmm0 -; X86-NEXT: retl -; -; X64-LABEL: no_extract_shrl: -; X64: # %bb.0: -; X64-NEXT: vpsrld $9, %xmm0, %xmm1 -; X64-NEXT: vpslld $25, %xmm0, %xmm0 -; X64-NEXT: vpternlogd $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0 -; X64-NEXT: retq +; CHECK-LABEL: no_extract_shrl: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsrld $9, %xmm0, %xmm1 +; CHECK-NEXT: vpslld $25, %xmm0, %xmm0 +; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = (xmm0 & m32bcst) | xmm1 +; CHECK-NEXT: ret{{[l|q]}} %lhs_div = lshr <4 x i32> %i, <i32 3, i32 3, i32 3, i32 3> %rhs_div = lshr <4 x i32> %i, <i32 9, i32 9, i32 9, i32 9> %lhs_shift = shl <4 x i32> %lhs_div, <i32 28, i32 28, i32 28, i32 28> diff --git a/llvm/test/CodeGen/X86/sdiv-exact.ll b/llvm/test/CodeGen/X86/sdiv-exact.ll index 4568191..7873ffa 100644 --- a/llvm/test/CodeGen/X86/sdiv-exact.ll +++ b/llvm/test/CodeGen/X86/sdiv-exact.ll @@ -87,7 +87,7 @@ define <4 x i32> @test5(<4 x i32> %x) { ; X86-NEXT: pmuludq %xmm1, %xmm0 ; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3] -; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [2863311531,u,3264175145,u] ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-NEXT: retl @@ -95,7 +95,7 @@ define <4 x i32> @test5(<4 x i32> %x) { ; X64-LABEL: test5: ; X64: # %bb.0: ; X64-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,3264175145,3264175145] ; X64-NEXT: retq %div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 25, i32 25> ret <4 x i32> %div @@ -112,7 +112,7 @@ define <4 x i32> @test6(<4 x i32> %x) { ; X86-NEXT: pmuludq %xmm0, %xmm1 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3] -; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [2863311531,u,3303820997,u] ; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; X86-NEXT: movdqa %xmm1, %xmm0 @@ -121,7 +121,7 @@ define <4 x i32> @test6(<4 x i32> %x) { ; X64-LABEL: test6: ; X64: # %bb.0: ; X64-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,3303820997,3303820997] ; X64-NEXT: retq %div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 26, i32 26> ret <4 x i32> %div @@ -131,16 +131,16 @@ define <4 x i32> @test7(<4 x i32> %x) { ; X86-LABEL: test7: ; X86: # %bb.0: ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [3264175145,3264175145,1749801491,1749801491] ; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [3264175145,u,1749801491,u] ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-NEXT: retl ; ; X64-LABEL: test7: ; X64: # %bb.0: -; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,1749801491,1749801491] ; X64-NEXT: retq %div = sdiv exact <4 x i32> %x, <i32 25, i32 25, i32 27, i32 27> ret <4 x i32> %div @@ -156,7 +156,7 @@ define <4 x i32> @test8(<4 x i32> %x) { ; X86-NEXT: pmuludq %xmm1, %xmm0 ; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3] -; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [1,u,2863311531,u] ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-NEXT: retl @@ -164,7 +164,7 @@ define <4 x i32> @test8(<4 x i32> %x) { ; X64-LABEL: test8: ; X64: # %bb.0: ; X64-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,1,2863311531,2863311531] ; X64-NEXT: retq %div = sdiv exact <4 x i32> %x, <i32 1, i32 1, i32 24, i32 24> ret <4 x i32> %div diff --git a/llvm/test/CodeGen/X86/setcc-wide-types.ll b/llvm/test/CodeGen/X86/setcc-wide-types.ll index 5aa266d..d018c53 100644 --- a/llvm/test/CodeGen/X86/setcc-wide-types.ll +++ b/llvm/test/CodeGen/X86/setcc-wide-types.ll @@ -1447,3 +1447,175 @@ define i1 @eq_i512_load_arg(ptr%p, i512 %b) { %r = icmp eq i512 %a, %b ret i1 %r } + +; Tests for any/allbits from memory. + +define i1 @anybits_i128_load_arg(ptr %w) { +; ANY-LABEL: anybits_i128_load_arg: +; ANY: # %bb.0: +; ANY-NEXT: movq (%rdi), %rax +; ANY-NEXT: orq 8(%rdi), %rax +; ANY-NEXT: setne %al +; ANY-NEXT: retq + %ld = load i128, ptr %w + %cmp = icmp ne i128 %ld, 0 + ret i1 %cmp +} + +define i1 @allbits_i128_load_arg(ptr %w) { +; SSE2-LABEL: allbits_i128_load_arg: +; SSE2: # %bb.0: +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE2-NEXT: pcmpeqb (%rdi), %xmm0 +; SSE2-NEXT: pmovmskb %xmm0, %eax +; SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; SSE2-NEXT: sete %al +; SSE2-NEXT: retq +; +; SSE41-LABEL: allbits_i128_load_arg: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa (%rdi), %xmm0 +; SSE41-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE41-NEXT: ptest %xmm1, %xmm0 +; SSE41-NEXT: setb %al +; SSE41-NEXT: retq +; +; AVXANY-LABEL: allbits_i128_load_arg: +; AVXANY: # %bb.0: +; AVXANY-NEXT: vmovdqa (%rdi), %xmm0 +; AVXANY-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVXANY-NEXT: vptest %xmm1, %xmm0 +; AVXANY-NEXT: setb %al +; AVXANY-NEXT: retq + %ld = load i128, ptr %w + %cmp = icmp eq i128 %ld, -1 + ret i1 %cmp +} + +define i1 @anybits_i256_load_arg(ptr %w) { +; SSE-LABEL: anybits_i256_load_arg: +; SSE: # %bb.0: +; SSE-NEXT: movq (%rdi), %rax +; SSE-NEXT: movq 8(%rdi), %rcx +; SSE-NEXT: orq 24(%rdi), %rcx +; SSE-NEXT: orq 16(%rdi), %rax +; SSE-NEXT: orq %rcx, %rax +; SSE-NEXT: setne %al +; SSE-NEXT: retq +; +; AVXANY-LABEL: anybits_i256_load_arg: +; AVXANY: # %bb.0: +; AVXANY-NEXT: vmovdqu (%rdi), %ymm0 +; AVXANY-NEXT: vptest %ymm0, %ymm0 +; AVXANY-NEXT: setne %al +; AVXANY-NEXT: vzeroupper +; AVXANY-NEXT: retq + %ld = load i256, ptr %w + %cmp = icmp ne i256 %ld, 0 + ret i1 %cmp +} + +define i1 @allbits_i256_load_arg(ptr %w) { +; SSE-LABEL: allbits_i256_load_arg: +; SSE: # %bb.0: +; SSE-NEXT: movq (%rdi), %rax +; SSE-NEXT: movq 8(%rdi), %rcx +; SSE-NEXT: andq 24(%rdi), %rcx +; SSE-NEXT: andq 16(%rdi), %rax +; SSE-NEXT: andq %rcx, %rax +; SSE-NEXT: cmpq $-1, %rax +; SSE-NEXT: sete %al +; SSE-NEXT: retq +; +; AVX1-LABEL: allbits_i256_load_arg: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovdqu (%rdi), %ymm0 +; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1 +; AVX1-NEXT: vptest %ymm1, %ymm0 +; AVX1-NEXT: setb %al +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: allbits_i256_load_arg: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vptest %ymm1, %ymm0 +; AVX2-NEXT: setb %al +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: allbits_i256_load_arg: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqu (%rdi), %ymm0 +; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512-NEXT: vptest %ymm1, %ymm0 +; AVX512-NEXT: setb %al +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %ld = load i256, ptr %w + %cmp = icmp eq i256 %ld, -1 + ret i1 %cmp +} + +define i1 @anybits_i512_load_arg(ptr %w) { +; NO512-LABEL: anybits_i512_load_arg: +; NO512: # %bb.0: +; NO512-NEXT: movq 16(%rdi), %rax +; NO512-NEXT: movq (%rdi), %rcx +; NO512-NEXT: movq 8(%rdi), %rdx +; NO512-NEXT: movq 24(%rdi), %rsi +; NO512-NEXT: orq 56(%rdi), %rsi +; NO512-NEXT: orq 40(%rdi), %rdx +; NO512-NEXT: orq %rsi, %rdx +; NO512-NEXT: orq 48(%rdi), %rax +; NO512-NEXT: orq 32(%rdi), %rcx +; NO512-NEXT: orq %rax, %rcx +; NO512-NEXT: orq %rdx, %rcx +; NO512-NEXT: setne %al +; NO512-NEXT: retq +; +; AVX512-LABEL: anybits_i512_load_arg: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0 +; AVX512-NEXT: kortestw %k0, %k0 +; AVX512-NEXT: setne %al +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %ld = load i512, ptr %w + %cmp = icmp ne i512 %ld, 0 + ret i1 %cmp +} + +define i1 @allbits_i512_load_arg(ptr %w) { +; NO512-LABEL: allbits_i512_load_arg: +; NO512: # %bb.0: +; NO512-NEXT: movq 16(%rdi), %rax +; NO512-NEXT: movq (%rdi), %rcx +; NO512-NEXT: movq 8(%rdi), %rdx +; NO512-NEXT: movq 24(%rdi), %rsi +; NO512-NEXT: andq 56(%rdi), %rsi +; NO512-NEXT: andq 40(%rdi), %rdx +; NO512-NEXT: andq %rsi, %rdx +; NO512-NEXT: andq 48(%rdi), %rax +; NO512-NEXT: andq 32(%rdi), %rcx +; NO512-NEXT: andq %rax, %rcx +; NO512-NEXT: andq %rdx, %rcx +; NO512-NEXT: cmpq $-1, %rcx +; NO512-NEXT: sete %al +; NO512-NEXT: retq +; +; AVX512-LABEL: allbits_i512_load_arg: +; AVX512: # %bb.0: +; AVX512-NEXT: vpternlogd {{.*#+}} zmm0 = -1 +; AVX512-NEXT: vpcmpneqd (%rdi), %zmm0, %k0 +; AVX512-NEXT: kortestw %k0, %k0 +; AVX512-NEXT: sete %al +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %ld = load i512, ptr %w + %cmp = icmp eq i512 %ld, -1 + ret i1 %cmp +} diff --git a/llvm/test/CodeGen/X86/shrink_vmul.ll b/llvm/test/CodeGen/X86/shrink_vmul.ll index e53eed4..504a392 100644 --- a/llvm/test/CodeGen/X86/shrink_vmul.ll +++ b/llvm/test/CodeGen/X86/shrink_vmul.ll @@ -1760,7 +1760,7 @@ define void @mul_2xi16_varconst1(ptr nocapture readonly %a, i64 %index) { ; X86-AVX-NEXT: movl c, %edx ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [0,65535,u,u] ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; @@ -1781,7 +1781,7 @@ define void @mul_2xi16_varconst1(ptr nocapture readonly %a, i64 %index) { ; X64-AVX-NEXT: movq c(%rip), %rax ; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,65535,u,u] ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4) ; X64-AVX-NEXT: retq entry: @@ -1864,7 +1864,7 @@ define void @mul_2xi16_varconst3(ptr nocapture readonly %a, i64 %index) { ; X86-SSE-NEXT: movl c, %edx ; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7] -; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [65536,65536,65536,65536] ; X86-SSE-NEXT: psllq $32, %xmm0 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4) ; X86-SSE-NEXT: retl @@ -1876,7 +1876,7 @@ define void @mul_2xi16_varconst3(ptr nocapture readonly %a, i64 %index) { ; X86-AVX-NEXT: movl c, %edx ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [0,65536,u,u] ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; @@ -1885,7 +1885,7 @@ define void @mul_2xi16_varconst3(ptr nocapture readonly %a, i64 %index) { ; X64-SSE-NEXT: movq c(%rip), %rax ; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X64-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7] -; X64-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [65536,65536,65536,65536] ; X64-SSE-NEXT: psllq $32, %xmm0 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4) ; X64-SSE-NEXT: retq @@ -1895,7 +1895,7 @@ define void @mul_2xi16_varconst3(ptr nocapture readonly %a, i64 %index) { ; X64-AVX-NEXT: movq c(%rip), %rax ; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,65536,u,u] ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4) ; X64-AVX-NEXT: retq entry: @@ -1922,7 +1922,7 @@ define void @mul_2xi16_varconst4(ptr nocapture readonly %a, i64 %index) { ; X86-SSE-NEXT: movl c, %edx ; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: psrad $16, %xmm0 -; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [32768,32768,32768,32768] ; X86-SSE-NEXT: psllq $32, %xmm0 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4) ; X86-SSE-NEXT: retl @@ -1934,7 +1934,7 @@ define void @mul_2xi16_varconst4(ptr nocapture readonly %a, i64 %index) { ; X86-AVX-NEXT: movl c, %edx ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0 -; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [0,32768,u,u] ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; @@ -1943,7 +1943,7 @@ define void @mul_2xi16_varconst4(ptr nocapture readonly %a, i64 %index) { ; X64-SSE-NEXT: movq c(%rip), %rax ; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X64-SSE-NEXT: psrad $16, %xmm0 -; X64-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [32768,32768,32768,32768] ; X64-SSE-NEXT: psllq $32, %xmm0 ; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4) ; X64-SSE-NEXT: retq @@ -1953,7 +1953,7 @@ define void @mul_2xi16_varconst4(ptr nocapture readonly %a, i64 %index) { ; X64-AVX-NEXT: movq c(%rip), %rax ; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X64-AVX-NEXT: vpmovsxwd %xmm0, %xmm0 -; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,32768,u,u] ; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4) ; X64-AVX-NEXT: retq entry: diff --git a/llvm/test/CodeGen/X86/slow-pmulld.ll b/llvm/test/CodeGen/X86/slow-pmulld.ll index 975ffd0..e8c05f9 100644 --- a/llvm/test/CodeGen/X86/slow-pmulld.ll +++ b/llvm/test/CodeGen/X86/slow-pmulld.ll @@ -336,13 +336,13 @@ define <4 x i32> @test_mul_v4i32_v4i16(<4 x i16> %A) { ; SSE4-32-LABEL: test_mul_v4i32_v4i16: ; SSE4-32: # %bb.0: ; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; SSE4-32-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; SSE4-32-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [18778,18778,18778,18778] ; SSE4-32-NEXT: retl ; ; SSE4-64-LABEL: test_mul_v4i32_v4i16: ; SSE4-64: # %bb.0: ; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; SSE4-64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE4-64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [18778,18778,18778,18778] ; SSE4-64-NEXT: retq ; ; AVX2-SLOW-LABEL: test_mul_v4i32_v4i16: @@ -838,13 +838,13 @@ define <4 x i32> @test_mul_v4i32_v4i16_minsize(<4 x i16> %A) minsize { ; SSE-32-LABEL: test_mul_v4i32_v4i16_minsize: ; SSE-32: # %bb.0: ; SSE-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; SSE-32-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; SSE-32-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [18778,18778,18778,18778] ; SSE-32-NEXT: retl ; ; SSE-64-LABEL: test_mul_v4i32_v4i16_minsize: ; SSE-64: # %bb.0: ; SSE-64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; SSE-64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [18778,18778,18778,18778] ; SSE-64-NEXT: retq ; ; AVX2-LABEL: test_mul_v4i32_v4i16_minsize: diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll index 42617c1..18588aa 100644 --- a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll +++ b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll @@ -24,7 +24,7 @@ define float @sqrt_ieee_ninf(float %f) #0 { ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 ; CHECK-NEXT: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF - ; CHECK-NEXT: [[VRSQRTSSr:%[0-9]+]]:fr32 = VRSQRTSSr killed [[DEF]], [[COPY]] + ; CHECK-NEXT: [[VRSQRTSSr:%[0-9]+]]:fr32 = ninf afn VRSQRTSSr killed [[DEF]], [[COPY]] ; CHECK-NEXT: [[VMULSSrr:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr ; CHECK-NEXT: [[VMOVSSrm_alt:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load (s32) from constant-pool) ; CHECK-NEXT: [[VFMADD213SSr:%[0-9]+]]:fr32 = ninf afn nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed [[VMULSSrr]], [[VMOVSSrm_alt]], implicit $mxcsr @@ -71,7 +71,7 @@ define float @sqrt_daz_ninf(float %f) #1 { ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 ; CHECK-NEXT: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF - ; CHECK-NEXT: [[VRSQRTSSr:%[0-9]+]]:fr32 = VRSQRTSSr killed [[DEF]], [[COPY]] + ; CHECK-NEXT: [[VRSQRTSSr:%[0-9]+]]:fr32 = ninf afn VRSQRTSSr killed [[DEF]], [[COPY]] ; CHECK-NEXT: [[VMULSSrr:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr ; CHECK-NEXT: [[VMOVSSrm_alt:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load (s32) from constant-pool) ; CHECK-NEXT: [[VFMADD213SSr:%[0-9]+]]:fr32 = ninf afn nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed [[VMULSSrr]], [[VMOVSSrm_alt]], implicit $mxcsr diff --git a/llvm/test/CodeGen/X86/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/X86/srem-seteq-illegal-types.ll index cc4bda8..650b562 100644 --- a/llvm/test/CodeGen/X86/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/X86/srem-seteq-illegal-types.ll @@ -1,6 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=i686-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X86 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X64 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 < %s | FileCheck %s --check-prefixes=X64,SSE2 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.1 < %s | FileCheck %s --check-prefixes=X64,SSE41 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx < %s | FileCheck %s --check-prefixes=X64,AVX1 diff --git a/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll b/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll index 2d07788..bb7245c 100644 --- a/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll +++ b/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll @@ -10,15 +10,15 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_even: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,3264175145,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -30,10 +30,10 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_even: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,3264175145,3264175145] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -47,10 +47,10 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_even: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,3264175145,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -63,7 +63,7 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_odd_even: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,3264175145,3264175145] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -75,7 +75,7 @@ define <4 x i32> @test_srem_odd_even(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,3264175145,3264175145] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -109,7 +109,7 @@ define <4 x i32> @test_srem_odd_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_allones_eq: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 @@ -119,7 +119,7 @@ define <4 x i32> @test_srem_odd_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_allones_eq: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -168,7 +168,7 @@ define <4 x i32> @test_srem_odd_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_allones_ne: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 @@ -178,7 +178,7 @@ define <4 x i32> @test_srem_odd_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_allones_ne: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -234,7 +234,7 @@ define <4 x i32> @test_srem_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_even_allones_eq: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE41-NEXT: psrld $1, %xmm1 @@ -248,7 +248,7 @@ define <4 x i32> @test_srem_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_even_allones_eq: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0 @@ -308,7 +308,7 @@ define <4 x i32> @test_srem_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_even_allones_ne: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE41-NEXT: psrld $1, %xmm1 @@ -322,7 +322,7 @@ define <4 x i32> @test_srem_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_even_allones_ne: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0 @@ -367,15 +367,15 @@ define <4 x i32> @test_srem_odd_even_allones_eq(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_even_allones_eq: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -387,10 +387,10 @@ define <4 x i32> @test_srem_odd_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_even_allones_eq: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -404,10 +404,10 @@ define <4 x i32> @test_srem_odd_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_even_allones_eq: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -420,7 +420,7 @@ define <4 x i32> @test_srem_odd_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_odd_even_allones_eq: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -432,7 +432,7 @@ define <4 x i32> @test_srem_odd_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_eq: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -448,15 +448,15 @@ define <4 x i32> @test_srem_odd_even_allones_ne(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_even_allones_ne: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -468,10 +468,10 @@ define <4 x i32> @test_srem_odd_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_even_allones_ne: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -485,10 +485,10 @@ define <4 x i32> @test_srem_odd_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_even_allones_ne: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -501,7 +501,7 @@ define <4 x i32> @test_srem_odd_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_odd_even_allones_ne: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -514,7 +514,7 @@ define <4 x i32> @test_srem_odd_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_ne: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -534,14 +534,14 @@ define <4 x i32> @test_srem_odd_poweroftwo(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_poweroftwo: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,u,1,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; CHECK-SSE2-NEXT: psrlq $32, %xmm0 @@ -553,7 +553,7 @@ define <4 x i32> @test_srem_odd_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_poweroftwo: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pmovsxdq {{.*#+}} xmm1 = [1,268435456] ; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1 @@ -568,9 +568,9 @@ define <4 x i32> @test_srem_odd_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_poweroftwo: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [1,u,268435456,u] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] ; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -581,7 +581,7 @@ define <4 x i32> @test_srem_odd_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_odd_poweroftwo: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -593,7 +593,7 @@ define <4 x i32> @test_srem_odd_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -611,9 +611,9 @@ define <4 x i32> @test_srem_even_poweroftwo(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_even_poweroftwo: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,u,1,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -636,11 +636,11 @@ define <4 x i32> @test_srem_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_even_poweroftwo: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,2147483648,2147483648] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,268435456,2147483648] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -654,11 +654,11 @@ define <4 x i32> @test_srem_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_even_poweroftwo: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,2147483648,2147483648] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,2147483648,268435456,2147483648] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -671,7 +671,7 @@ define <4 x i32> @test_srem_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_even_poweroftwo: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -683,7 +683,7 @@ define <4 x i32> @test_srem_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_even_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -701,9 +701,9 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_even_poweroftwo: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -711,7 +711,7 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind { ; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm1 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [2147483648,u,1073741824,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] @@ -725,11 +725,11 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_even_poweroftwo: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,u,1073741824,u] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2147483648,268435456,1073741824] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -743,11 +743,11 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_even_poweroftwo: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,u,1073741824,u] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2147483648,268435456,1073741824] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -760,7 +760,7 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_odd_even_poweroftwo: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -772,7 +772,7 @@ define <4 x i32> @test_srem_odd_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -806,7 +806,7 @@ define <4 x i32> @test_srem_odd_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 @@ -816,7 +816,7 @@ define <4 x i32> @test_srem_odd_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -871,7 +871,7 @@ define <4 x i32> @test_srem_even_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_even_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE41-NEXT: psrld $1, %xmm1 @@ -885,7 +885,7 @@ define <4 x i32> @test_srem_even_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_even_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0 @@ -929,15 +929,15 @@ define <4 x i32> @test_srem_odd_even_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_even_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -949,10 +949,10 @@ define <4 x i32> @test_srem_odd_even_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_even_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -966,10 +966,10 @@ define <4 x i32> @test_srem_odd_even_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_even_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -982,7 +982,7 @@ define <4 x i32> @test_srem_odd_even_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_odd_even_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -994,7 +994,7 @@ define <4 x i32> @test_srem_odd_even_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -1018,9 +1018,9 @@ define <4 x i32> @test_srem_odd_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-SSE2-NEXT: pand %xmm0, %xmm2 ; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm2 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,u,1,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 @@ -1039,7 +1039,7 @@ define <4 x i32> @test_srem_odd_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647] ; CHECK-SSE41-NEXT: pand %xmm0, %xmm2 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm2 -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,1,858993458] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 @@ -1053,7 +1053,7 @@ define <4 x i32> @test_srem_odd_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0 @@ -1067,7 +1067,7 @@ define <4 x i32> @test_srem_odd_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647] ; CHECK-AVX2-NEXT: vpand %xmm2, %xmm0, %xmm2 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1 -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0 @@ -1080,7 +1080,7 @@ define <4 x i32> @test_srem_odd_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; CHECK-AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm2 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0 @@ -1102,7 +1102,7 @@ define <4 x i32> @test_srem_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm1 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -1137,8 +1137,8 @@ define <4 x i32> @test_srem_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm2 ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [2147483648,2147483648,2147483648,2147483648] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [2147483648,2147483648,2,2147483648] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,2,2] @@ -1156,11 +1156,11 @@ define <4 x i32> @test_srem_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-AVX1-LABEL: test_srem_even_INT_MIN: ; CHECK-AVX1: # %bb.0: ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 # [3067833783,3067833783,1,3067833783] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [2147483648,2147483648,2147483648,2147483648] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [2147483648,2147483648,2,2147483648] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2] @@ -1177,7 +1177,7 @@ define <4 x i32> @test_srem_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-AVX2-LABEL: test_srem_even_INT_MIN: ; CHECK-AVX2: # %bb.0: ; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 # [3067833783,3067833783,1,3067833783] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 @@ -1196,7 +1196,7 @@ define <4 x i32> @test_srem_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; CHECK-AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm2 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 @@ -1219,7 +1219,7 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm1 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [3067833783,u,3264175145,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -1227,7 +1227,7 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm3 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,3,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,u,1073741824,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] @@ -1253,8 +1253,8 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm2 ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [2147483648,u,1073741824,u] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [1,2147483648,2,1073741824] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,2,2] @@ -1272,11 +1272,11 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-AVX1-LABEL: test_srem_odd_even_INT_MIN: ; CHECK-AVX1: # %bb.0: ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 # [3435973837,3067833783,1,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [2147483648,u,1073741824,u] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [1,2147483648,2,1073741824] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2] @@ -1293,7 +1293,7 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-AVX2-LABEL: test_srem_odd_even_INT_MIN: ; CHECK-AVX2: # %bb.0: ; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 # [3435973837,3067833783,1,3264175145] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 @@ -1312,7 +1312,7 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; CHECK-AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm2 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1 -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 @@ -1333,14 +1333,14 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_allones_and_poweroftwo: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,0,1,3435973837] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,u,3435973837,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; CHECK-SSE2-NEXT: psrlq $32, %xmm0 @@ -1352,7 +1352,7 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_allones_and_poweroftwo: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,0,1,3435973837] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pmovsxdq {{.*#+}} xmm1 = [1,268435456] ; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1 @@ -1367,9 +1367,9 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_allones_and_poweroftwo: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,3435973837] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [1,u,268435456,u] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] ; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1380,7 +1380,7 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_odd_allones_and_poweroftwo: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,3435973837] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -1392,7 +1392,7 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,3435973837] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -1410,9 +1410,9 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_even_allones_and_poweroftwo: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,0,1,3067833783] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,u,3067833783,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -1420,7 +1420,7 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm1 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [1,u,2147483648,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] @@ -1434,11 +1434,11 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_even_allones_and_poweroftwo: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,0,1,3067833783] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,2147483648,u] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,1,268435456,2147483648] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1452,11 +1452,11 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_even_allones_and_poweroftwo: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,0,1,3067833783] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,u,2147483648,u] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,1,268435456,2147483648] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1469,7 +1469,7 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_even_allones_and_poweroftwo: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,0,1,3067833783] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -1481,7 +1481,7 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,0,1,3067833783] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -1499,9 +1499,9 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi ; CHECK-SSE2-LABEL: test_srem_odd_even_allones_and_poweroftwo: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,0,1,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,u,3264175145,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -1509,7 +1509,7 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi ; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm1 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [1,u,1073741824,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] @@ -1523,11 +1523,11 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi ; ; CHECK-SSE41-LABEL: test_srem_odd_even_allones_and_poweroftwo: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,0,1,3264175145] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,1073741824,u] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,1,268435456,1073741824] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1541,11 +1541,11 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi ; ; CHECK-AVX1-LABEL: test_srem_odd_even_allones_and_poweroftwo: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,u,1073741824,u] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,1,268435456,1073741824] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1558,7 +1558,7 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi ; ; CHECK-AVX2-LABEL: test_srem_odd_even_allones_and_poweroftwo: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,3264175145] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -1570,7 +1570,7 @@ define <4 x i32> @test_srem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_and_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,3264175145] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -1604,7 +1604,7 @@ define <4 x i32> @test_srem_odd_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_allones_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,4294967295,4294967295,858993458] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 @@ -1614,7 +1614,7 @@ define <4 x i32> @test_srem_odd_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_allones_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -1669,7 +1669,7 @@ define <4 x i32> @test_srem_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_even_allones_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE41-NEXT: psrld $1, %xmm1 @@ -1683,7 +1683,7 @@ define <4 x i32> @test_srem_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_even_allones_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0 @@ -1727,15 +1727,15 @@ define <4 x i32> @test_srem_odd_even_allones_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_even_allones_and_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,0,0,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,u,3264175145,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,1,1073741824,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -1747,10 +1747,10 @@ define <4 x i32> @test_srem_odd_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_even_allones_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,0,0,3264175145] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,1,1073741824,1073741824] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1764,10 +1764,10 @@ define <4 x i32> @test_srem_odd_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_even_allones_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,0,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,1,1073741824,1073741824] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1780,7 +1780,7 @@ define <4 x i32> @test_srem_odd_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_odd_even_allones_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,0,3264175145] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -1792,7 +1792,7 @@ define <4 x i32> @test_srem_odd_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,0,3264175145] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -1812,15 +1812,15 @@ define <4 x i32> @test_srem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_poweroftwo_and_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3435973837] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,3435973837,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,268435456,1,1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -1832,10 +1832,10 @@ define <4 x i32> @test_srem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_poweroftwo_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3435973837] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,268435456,1,1] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1849,10 +1849,10 @@ define <4 x i32> @test_srem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_poweroftwo_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3435973837] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [268435456,268435456,1,1] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1865,7 +1865,7 @@ define <4 x i32> @test_srem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_odd_poweroftwo_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3435973837] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -1877,7 +1877,7 @@ define <4 x i32> @test_srem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3435973837] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -1895,9 +1895,9 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_even_poweroftwo_and_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,1,0,3067833783] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,3067833783,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -1905,7 +1905,7 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm1 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [268435456,u,2147483648,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] @@ -1919,11 +1919,11 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_even_poweroftwo_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,1,0,3067833783] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,u,2147483648,u] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,268435456,1,2147483648] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1937,11 +1937,11 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_even_poweroftwo_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,1,0,3067833783] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [268435456,u,2147483648,u] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,268435456,1,2147483648] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1954,7 +1954,7 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_even_poweroftwo_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,1,0,3067833783] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -1966,7 +1966,7 @@ define <4 x i32> @test_srem_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_even_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,1,0,3067833783] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -1984,15 +1984,15 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_even_poweroftwo_and_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,3264175145,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,268435456,1073741824,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -2004,10 +2004,10 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_even_poweroftwo_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3264175145] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,268435456,1073741824,1073741824] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -2021,10 +2021,10 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_even_poweroftwo_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [268435456,268435456,1073741824,1073741824] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -2037,7 +2037,7 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_srem_odd_even_poweroftwo_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3264175145] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -2049,7 +2049,7 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_srem_odd_even_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3264175145] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -2067,9 +2067,9 @@ define <4 x i32> @test_srem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind { define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_odd_allones_and_poweroftwo_and_one: ; CHECK-SSE2: # %bb.0: -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,u,1,u] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u] ; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE2-NEXT: psrlq $32, %xmm1 ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -2081,9 +2081,9 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou ; ; CHECK-SSE41-LABEL: test_srem_odd_allones_and_poweroftwo_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,u,1,u] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u] ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: psrlq $32, %xmm0 @@ -2096,9 +2096,9 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou ; ; CHECK-AVX1-LABEL: test_srem_odd_allones_and_poweroftwo_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,u,1,u] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,u,268435456,u] ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0 @@ -2110,7 +2110,7 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou ; ; CHECK-AVX2-LABEL: test_srem_odd_allones_and_poweroftwo_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,0] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -2122,7 +2122,7 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou ; ; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,0,1,0] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -2138,9 +2138,9 @@ define <4 x i32> @test_srem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou define <4 x i32> @test_srem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_srem_even_allones_and_poweroftwo_and_one: ; CHECK-SSE2: # %bb.0: -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,u,1,u] ; CHECK-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,u,268435456,u] ; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE2-NEXT: psrlq $32, %xmm1 ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -2152,9 +2152,9 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no ; ; CHECK-SSE41-LABEL: test_srem_even_allones_and_poweroftwo_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,u,1,u] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,u,268435456,u] ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: psrlq $32, %xmm0 @@ -2167,9 +2167,9 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no ; ; CHECK-AVX1-LABEL: test_srem_even_allones_and_poweroftwo_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,u,1,u] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,u,268435456,u] ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0 @@ -2181,7 +2181,7 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no ; ; CHECK-AVX2-LABEL: test_srem_even_allones_and_poweroftwo_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,0,1,0] ; CHECK-AVX2-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -2193,7 +2193,7 @@ define <4 x i32> @test_srem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no ; ; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,0,1,0] ; CHECK-AVX512VL-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -2335,10 +2335,10 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-AVX1: # %bb.0: ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [34048,34048,26368,37632,21760,33024,22016,35072] +; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,133,0,133,0,103,0,147,0,85,0,129,0,86,0,137] ; CHECK-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; CHECK-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [20224,26368,6912,30976,33024,33024,33024,12032] +; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,79,0,103,0,27,0,121,0,129,0,129,0,129,0,47] ; CHECK-AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; CHECK-AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm4 ; CHECK-AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm5 # [0,0,0,0,1,0,1,0,1,0,0,0,0,0,0,0] @@ -2369,10 +2369,10 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-AVX1-NEXT: vpsubb %xmm4, %xmm0, %xmm4 ; CHECK-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6, %xmm6 # [2304,0,10496,37632,33024,33024,21760,36096] +; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6, %xmm6 # [0,9,0,0,0,41,0,147,0,129,0,129,0,85,0,141] ; CHECK-AVX1-NEXT: vpsrlw $8, %xmm6, %xmm6 ; CHECK-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7, %xmm7 # [22016,24320,37632,11008,12544,32512,16640,37632] +; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7, %xmm7 # [0,86,0,95,0,147,0,43,0,49,0,127,0,65,0,147] ; CHECK-AVX1-NEXT: vpsrlw $8, %xmm7, %xmm7 ; CHECK-AVX1-NEXT: vpackuswb %xmm6, %xmm7, %xmm6 ; CHECK-AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm7 # [0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0] @@ -2417,10 +2417,10 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-AVX2: # %bb.0: ; CHECK-AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31] -; CHECK-AVX2-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [34048,34048,26368,37632,21760,33024,22016,35072,2304,0,10496,37632,33024,33024,21760,36096] +; CHECK-AVX2-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,133,0,133,0,103,0,147,0,85,0,129,0,86,0,137,0,9,0,0,0,41,0,147,0,129,0,129,0,85,0,141] ; CHECK-AVX2-NEXT: vpsrlw $8, %ymm3, %ymm3 ; CHECK-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23] -; CHECK-AVX2-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [20224,26368,6912,30976,33024,33024,33024,12032,22016,24320,37632,11008,12544,32512,16640,37632] +; CHECK-AVX2-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,79,0,103,0,27,0,121,0,129,0,129,0,129,0,47,0,86,0,95,0,147,0,43,0,49,0,127,0,65,0,147] ; CHECK-AVX2-NEXT: vpsrlw $8, %ymm4, %ymm4 ; CHECK-AVX2-NEXT: vpackuswb %ymm3, %ymm4, %ymm3 ; CHECK-AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm4 # [0,0,0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0] diff --git a/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll b/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll index 3359202..d459d01 100644 --- a/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll +++ b/llvm/test/CodeGen/X86/srem-seteq-vec-splat.ll @@ -24,7 +24,7 @@ define <4 x i32> @test_srem_odd_25(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_25: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 @@ -34,7 +34,7 @@ define <4 x i32> @test_srem_odd_25(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_25: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -90,7 +90,7 @@ define <4 x i32> @test_srem_even_100(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_even_100: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE41-NEXT: psrld $2, %xmm1 @@ -104,7 +104,7 @@ define <4 x i32> @test_srem_even_100(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_even_100: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $2, %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpslld $30, %xmm0, %xmm0 @@ -165,7 +165,7 @@ define <4 x i32> @test_srem_odd_neg25(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_odd_neg25: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 @@ -175,7 +175,7 @@ define <4 x i32> @test_srem_odd_neg25(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_odd_neg25: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -231,7 +231,7 @@ define <4 x i32> @test_srem_even_neg100(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_srem_even_neg100: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE41-NEXT: psrld $2, %xmm1 @@ -245,7 +245,7 @@ define <4 x i32> @test_srem_even_neg100(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_srem_even_neg100: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $2, %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpslld $30, %xmm0, %xmm0 @@ -333,7 +333,7 @@ define <4 x i32> @test_srem_odd_undef1(<4 x i32> %X) nounwind { ; CHECK-SSE41-NEXT: psrld $31, %xmm1 ; CHECK-SSE41-NEXT: psrad $3, %xmm2 ; CHECK-SSE41-NEXT: paddd %xmm1, %xmm2 -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [25,25,25,25] ; CHECK-SSE41-NEXT: psubd %xmm2, %xmm0 ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -351,7 +351,7 @@ define <4 x i32> @test_srem_odd_undef1(<4 x i32> %X) nounwind { ; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm2 ; CHECK-AVX1-NEXT: vpsrad $3, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [25,25,25,25] ; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -444,7 +444,7 @@ define <4 x i32> @test_srem_even_undef1(<4 x i32> %X) nounwind { ; CHECK-SSE41-NEXT: psrld $31, %xmm1 ; CHECK-SSE41-NEXT: psrad $5, %xmm2 ; CHECK-SSE41-NEXT: paddd %xmm1, %xmm2 -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [100,100,100,100] ; CHECK-SSE41-NEXT: psubd %xmm2, %xmm0 ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -462,7 +462,7 @@ define <4 x i32> @test_srem_even_undef1(<4 x i32> %X) nounwind { ; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm2 ; CHECK-AVX1-NEXT: vpsrad $5, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [100,100,100,100] ; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/udiv-exact.ll b/llvm/test/CodeGen/X86/udiv-exact.ll index 271d11e..2b3f26a 100644 --- a/llvm/test/CodeGen/X86/udiv-exact.ll +++ b/llvm/test/CodeGen/X86/udiv-exact.ll @@ -87,7 +87,7 @@ define <4 x i32> @test5(<4 x i32> %x) { ; X86-NEXT: pmuludq %xmm1, %xmm0 ; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3] -; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [2863311531,u,3264175145,u] ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-NEXT: retl @@ -95,7 +95,7 @@ define <4 x i32> @test5(<4 x i32> %x) { ; X64-LABEL: test5: ; X64: # %bb.0: ; X64-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,3264175145,3264175145] ; X64-NEXT: retq %div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 25, i32 25> ret <4 x i32> %div @@ -112,7 +112,7 @@ define <4 x i32> @test6(<4 x i32> %x) { ; X86-NEXT: pmuludq %xmm0, %xmm1 ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3] -; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [2863311531,u,3303820997,u] ; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; X86-NEXT: movdqa %xmm1, %xmm0 @@ -121,7 +121,7 @@ define <4 x i32> @test6(<4 x i32> %x) { ; X64-LABEL: test6: ; X64: # %bb.0: ; X64-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,3303820997,3303820997] ; X64-NEXT: retq %div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 26, i32 26> ret <4 x i32> %div @@ -131,16 +131,16 @@ define <4 x i32> @test7(<4 x i32> %x) { ; X86-LABEL: test7: ; X86: # %bb.0: ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [3264175145,3264175145,1749801491,1749801491] ; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [3264175145,u,1749801491,u] ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-NEXT: retl ; ; X64-LABEL: test7: ; X64: # %bb.0: -; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,1749801491,1749801491] ; X64-NEXT: retq %div = udiv exact <4 x i32> %x, <i32 25, i32 25, i32 27, i32 27> ret <4 x i32> %div @@ -156,7 +156,7 @@ define <4 x i32> @test8(<4 x i32> %x) { ; X86-NEXT: pmuludq %xmm1, %xmm0 ; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3] -; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [1,u,2863311531,u] ; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-NEXT: retl @@ -164,7 +164,7 @@ define <4 x i32> @test8(<4 x i32> %x) { ; X64-LABEL: test8: ; X64: # %bb.0: ; X64-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,1,2863311531,2863311531] ; X64-NEXT: retq %div = udiv exact <4 x i32> %x, <i32 1, i32 1, i32 24, i32 24> ret <4 x i32> %div diff --git a/llvm/test/CodeGen/X86/undo-mul-and.ll b/llvm/test/CodeGen/X86/undo-mul-and.ll index c9c40099..6566153 100644 --- a/llvm/test/CodeGen/X86/undo-mul-and.ll +++ b/llvm/test/CodeGen/X86/undo-mul-and.ll @@ -63,9 +63,9 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_fail_no_splat(<4 x i32> %x) { ; CHECK-SSE-LABEL: mul_and_to_neg_shl_and_vec_fail_no_splat: ; CHECK-SSE: # %bb.0: ; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [56,56,56,64] ; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [56,u,64,u] ; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -73,13 +73,13 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_fail_no_splat(<4 x i32> %x) { ; ; CHECK-AVX1-LABEL: mul_and_to_neg_shl_and_vec_fail_no_splat: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [56,56,56,64] ; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: retq ; ; CHECK-AVX512-LABEL: mul_and_to_neg_shl_and_vec_fail_no_splat: ; CHECK-AVX512: # %bb.0: -; CHECK-AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [56,56,56,64] ; CHECK-AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 ; CHECK-AVX512-NEXT: retq %mul = mul <4 x i32> %x, <i32 56, i32 56, i32 56, i32 64> @@ -92,9 +92,9 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_todo_no_splat1(<4 x i32> %x) { ; CHECK-SSE-LABEL: mul_and_to_neg_shl_and_vec_todo_no_splat1: ; CHECK-SSE: # %bb.0: ; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [56,56,56,48] ; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [56,u,48,u] ; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -102,13 +102,13 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_todo_no_splat1(<4 x i32> %x) { ; ; CHECK-AVX1-LABEL: mul_and_to_neg_shl_and_vec_todo_no_splat1: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [56,56,56,48] ; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: retq ; ; CHECK-AVX512-LABEL: mul_and_to_neg_shl_and_vec_todo_no_splat1: ; CHECK-AVX512: # %bb.0: -; CHECK-AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [56,56,56,48] ; CHECK-AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 ; CHECK-AVX512-NEXT: retq %mul = mul <4 x i32> %x, <i32 56, i32 56, i32 56, i32 48> @@ -131,7 +131,7 @@ define <4 x i32> @mul_and_to_neg_shl_and_vec_todo_no_splat2(<4 x i32> %x) { ; ; CHECK-AVX1-LABEL: mul_and_to_neg_shl_and_vec_todo_no_splat2: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [56,56,56,56] ; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll index 7c1a1e2..759055d 100644 --- a/llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll @@ -1,6 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=i686-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X86 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X64 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 < %s | FileCheck %s --check-prefixes=X64,SSE2 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.1 < %s | FileCheck %s --check-prefixes=X64,SSE41 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx < %s | FileCheck %s --check-prefixes=X64,AVX1 @@ -168,7 +167,7 @@ define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind { ; SSE41-NEXT: pinsrd $1, %esi, %xmm0 ; SSE41-NEXT: pinsrd $2, %edx, %xmm0 ; SSE41-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [683,1463,819,u] ; SSE41-NEXT: pmovsxwd {{.*#+}} xmm1 = [2047,2047,2047,2047] ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: pand %xmm1, %xmm2 @@ -194,7 +193,7 @@ define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind { ; AVX1-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 ; AVX1-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 ; AVX1-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [683,1463,819,u] ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [2047,2047,2047,2047] ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2 ; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 @@ -219,7 +218,7 @@ define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind { ; AVX2-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 ; AVX2-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 ; AVX2-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [683,1463,819,u] ; AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2047,2047,2047,2047] ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 @@ -241,7 +240,7 @@ define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind { ; AVX512VL-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 ; AVX512VL-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 ; AVX512VL-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [683,1463,819,u] ; AVX512VL-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2047,2047,2047,2047] ; AVX512VL-NEXT: vpand %xmm2, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll index 838086e..2228c09 100644 --- a/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll +++ b/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll @@ -10,10 +10,10 @@ define <4 x i32> @test_urem_odd_even(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_even: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,3264175145,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -26,9 +26,9 @@ define <4 x i32> @test_urem_odd_even(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_even: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,3264175145,3264175145] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -42,9 +42,9 @@ define <4 x i32> @test_urem_odd_even(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_even: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,3264175145,3264175145] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -57,7 +57,7 @@ define <4 x i32> @test_urem_odd_even(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_odd_even: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,3264175145,3264175145] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -68,7 +68,7 @@ define <4 x i32> @test_urem_odd_even(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_odd_even: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,3264175145,3264175145] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -87,9 +87,9 @@ define <4 x i32> @test_urem_odd_allones_eq(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_allones_eq: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,4294967295,3435973837] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -99,7 +99,7 @@ define <4 x i32> @test_urem_odd_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_allones_eq: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,4294967295,3435973837] ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993459,858993459,1,858993459] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -108,7 +108,7 @@ define <4 x i32> @test_urem_odd_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX-LABEL: test_urem_odd_allones_eq: ; CHECK-AVX: # %bb.0: -; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,4294967295,3435973837] ; CHECK-AVX-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX-NEXT: vpsrld $31, %xmm0, %xmm0 @@ -122,9 +122,9 @@ define <4 x i32> @test_urem_odd_allones_ne(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_allones_ne: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,4294967295,3435973837] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -134,7 +134,7 @@ define <4 x i32> @test_urem_odd_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_allones_ne: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,4294967295,3435973837] ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993460,858993460,2,858993460] ; CHECK-SSE41-NEXT: pmaxud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -143,7 +143,7 @@ define <4 x i32> @test_urem_odd_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX-LABEL: test_urem_odd_allones_ne: ; CHECK-AVX: # %bb.0: -; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,4294967295,3435973837] ; CHECK-AVX-NEXT: vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX-NEXT: vpsrld $31, %xmm0, %xmm0 @@ -159,12 +159,12 @@ define <4 x i32> @test_urem_even_allones_eq(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_even_allones_eq: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] ; CHECK-SSE2-NEXT: pmuludq %xmm2, %xmm1 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,4294967295,3067833783] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1,2147483648] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] @@ -178,10 +178,10 @@ define <4 x i32> @test_urem_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_even_allones_eq: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,4294967295,3067833783] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,2147483648,2147483648] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1,2147483648] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -195,10 +195,10 @@ define <4 x i32> @test_urem_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_even_allones_eq: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,4294967295,3067833783] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,2147483648,2147483648] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,2147483648,1,2147483648] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -211,7 +211,7 @@ define <4 x i32> @test_urem_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_even_allones_eq: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,4294967295,3067833783] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -222,7 +222,7 @@ define <4 x i32> @test_urem_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_even_allones_eq: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,4294967295,3067833783] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -237,12 +237,12 @@ define <4 x i32> @test_urem_even_allones_ne(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_even_allones_ne: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] ; CHECK-SSE2-NEXT: pmuludq %xmm2, %xmm1 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,4294967295,3067833783] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1,2147483648] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] @@ -256,10 +256,10 @@ define <4 x i32> @test_urem_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_even_allones_ne: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,4294967295,3067833783] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,2147483648,2147483648] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,1,2147483648] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -273,10 +273,10 @@ define <4 x i32> @test_urem_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_even_allones_ne: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,4294967295,3067833783] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,2147483648,2147483648] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,2147483648,1,2147483648] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -289,7 +289,7 @@ define <4 x i32> @test_urem_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_even_allones_ne: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,4294967295,3067833783] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -300,7 +300,7 @@ define <4 x i32> @test_urem_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_even_allones_ne: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,4294967295,3067833783] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -317,10 +317,10 @@ define <4 x i32> @test_urem_odd_even_allones_eq(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_even_allones_eq: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,4294967295,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -333,9 +333,9 @@ define <4 x i32> @test_urem_odd_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_even_allones_eq: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,4294967295,3264175145] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -349,9 +349,9 @@ define <4 x i32> @test_urem_odd_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_even_allones_eq: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,4294967295,3264175145] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -364,7 +364,7 @@ define <4 x i32> @test_urem_odd_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_odd_even_allones_eq: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,4294967295,3264175145] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -375,7 +375,7 @@ define <4 x i32> @test_urem_odd_even_allones_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_odd_even_allones_eq: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,4294967295,3264175145] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -390,10 +390,10 @@ define <4 x i32> @test_urem_odd_even_allones_ne(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_even_allones_ne: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,4294967295,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -406,9 +406,9 @@ define <4 x i32> @test_urem_odd_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_even_allones_ne: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,4294967295,3264175145] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -422,9 +422,9 @@ define <4 x i32> @test_urem_odd_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_even_allones_ne: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,4294967295,3264175145] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -437,7 +437,7 @@ define <4 x i32> @test_urem_odd_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_odd_even_allones_ne: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,4294967295,3264175145] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -448,7 +448,7 @@ define <4 x i32> @test_urem_odd_even_allones_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_odd_even_allones_ne: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,4294967295,3264175145] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -467,10 +467,10 @@ define <4 x i32> @test_urem_odd_poweroftwo(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_poweroftwo: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,1,3435973837] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; CHECK-SSE2-NEXT: psrlq $32, %xmm0 @@ -482,7 +482,7 @@ define <4 x i32> @test_urem_odd_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_poweroftwo: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-SSE41-NEXT: pmovsxdq {{.*#+}} xmm1 = [1,268435456] ; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] @@ -496,8 +496,8 @@ define <4 x i32> @test_urem_odd_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_poweroftwo: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [1,u,268435456,u] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] ; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -508,7 +508,7 @@ define <4 x i32> @test_urem_odd_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_odd_poweroftwo: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -519,7 +519,7 @@ define <4 x i32> @test_urem_odd_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_odd_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -536,12 +536,12 @@ define <4 x i32> @test_urem_even_poweroftwo(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_even_poweroftwo: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] ; CHECK-SSE2-NEXT: pmuludq %xmm2, %xmm1 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,1,3067833783] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,268435456,2147483648] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] @@ -555,10 +555,10 @@ define <4 x i32> @test_urem_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_even_poweroftwo: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,2147483648,2147483648] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,268435456,2147483648] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -572,10 +572,10 @@ define <4 x i32> @test_urem_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_even_poweroftwo: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,2147483648,2147483648] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,2147483648,268435456,2147483648] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -588,7 +588,7 @@ define <4 x i32> @test_urem_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_even_poweroftwo: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -599,7 +599,7 @@ define <4 x i32> @test_urem_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_even_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -616,11 +616,11 @@ define <4 x i32> @test_urem_odd_even_poweroftwo(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_even_poweroftwo: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,1,3264175145] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2147483648,268435456,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,u,1073741824,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] @@ -634,10 +634,10 @@ define <4 x i32> @test_urem_odd_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_even_poweroftwo: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,u,1073741824,u] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2147483648,268435456,1073741824] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -651,10 +651,10 @@ define <4 x i32> @test_urem_odd_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_even_poweroftwo: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,u,1073741824,u] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2147483648,268435456,1073741824] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -667,7 +667,7 @@ define <4 x i32> @test_urem_odd_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_odd_even_poweroftwo: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -678,7 +678,7 @@ define <4 x i32> @test_urem_odd_even_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_odd_even_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -710,7 +710,7 @@ define <4 x i32> @test_urem_odd_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993459,858993459,4294967295,858993459] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -719,7 +719,7 @@ define <4 x i32> @test_urem_odd_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0 @@ -769,7 +769,7 @@ define <4 x i32> @test_urem_even_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_even_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE41-NEXT: psrld $1, %xmm1 ; CHECK-SSE41-NEXT: pslld $31, %xmm0 @@ -782,7 +782,7 @@ define <4 x i32> @test_urem_even_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_even_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -822,10 +822,10 @@ define <4 x i32> @test_urem_odd_even_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_even_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -838,9 +838,9 @@ define <4 x i32> @test_urem_odd_even_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_even_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -854,9 +854,9 @@ define <4 x i32> @test_urem_odd_even_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_even_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,1073741824,1073741824] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -869,7 +869,7 @@ define <4 x i32> @test_urem_odd_even_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_odd_even_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -880,7 +880,7 @@ define <4 x i32> @test_urem_odd_even_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_odd_even_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,0,3264175145] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -899,10 +899,10 @@ define <4 x i32> @test_urem_odd_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_INT_MIN: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,1,3435973837] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,2,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; CHECK-SSE2-NEXT: psrlq $32, %xmm0 @@ -914,7 +914,7 @@ define <4 x i32> @test_urem_odd_INT_MIN(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_INT_MIN: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-SSE41-NEXT: pmovsxbq {{.*#+}} xmm1 = [1,2] ; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] @@ -928,8 +928,8 @@ define <4 x i32> @test_urem_odd_INT_MIN(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_INT_MIN: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [1,u,2,u] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] ; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -940,7 +940,7 @@ define <4 x i32> @test_urem_odd_INT_MIN(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_odd_INT_MIN: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -951,7 +951,7 @@ define <4 x i32> @test_urem_odd_INT_MIN(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_odd_INT_MIN: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,1,3435973837] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -968,12 +968,12 @@ define <4 x i32> @test_urem_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_even_INT_MIN: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,3067833783,3067833783,3067833783] ; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] ; CHECK-SSE2-NEXT: pmuludq %xmm2, %xmm1 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,1,3067833783] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,2,2147483648] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] @@ -987,10 +987,10 @@ define <4 x i32> @test_urem_even_INT_MIN(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_even_INT_MIN: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,2147483648,2147483648,2147483648] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,2147483648,2,2147483648] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1004,10 +1004,10 @@ define <4 x i32> @test_urem_even_INT_MIN(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_even_INT_MIN: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,2147483648,2147483648,2147483648] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,2147483648,2,2147483648] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1020,7 +1020,7 @@ define <4 x i32> @test_urem_even_INT_MIN(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_even_INT_MIN: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1031,7 +1031,7 @@ define <4 x i32> @test_urem_even_INT_MIN(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_even_INT_MIN: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,3067833783,1,3067833783] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -1048,11 +1048,11 @@ define <4 x i32> @test_urem_odd_even_INT_MIN(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_even_INT_MIN: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,1,3264175145] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2147483648,2,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3067833783,u,3264175145,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,u,1073741824,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] @@ -1066,10 +1066,10 @@ define <4 x i32> @test_urem_odd_even_INT_MIN(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_even_INT_MIN: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2147483648,u,1073741824,u] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2147483648,2,1073741824] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1083,10 +1083,10 @@ define <4 x i32> @test_urem_odd_even_INT_MIN(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_even_INT_MIN: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2147483648,u,1073741824,u] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2147483648,2,1073741824] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1099,7 +1099,7 @@ define <4 x i32> @test_urem_odd_even_INT_MIN(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_odd_even_INT_MIN: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1110,7 +1110,7 @@ define <4 x i32> @test_urem_odd_even_INT_MIN(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_odd_even_INT_MIN: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3067833783,1,3264175145] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -1129,10 +1129,10 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_allones_and_poweroftwo: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,u,3435973837,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,1,3435973837] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; CHECK-SSE2-NEXT: psrlq $32, %xmm0 @@ -1144,7 +1144,7 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_allones_and_poweroftwo: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,1,3435973837] ; CHECK-SSE41-NEXT: pmovsxdq {{.*#+}} xmm1 = [1,268435456] ; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] @@ -1158,8 +1158,8 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_allones_and_poweroftwo: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,3435973837] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [1,u,268435456,u] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] ; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1170,7 +1170,7 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_odd_allones_and_poweroftwo: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,3435973837] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1181,7 +1181,7 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_odd_allones_and_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,3435973837] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -1198,11 +1198,11 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_even_allones_and_poweroftwo: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,4294967295,1,3067833783] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,1,268435456,2147483648] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,u,3067833783,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,2147483648,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] @@ -1216,10 +1216,10 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_even_allones_and_poweroftwo: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,4294967295,1,3067833783] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,2147483648,u] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,1,268435456,2147483648] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1233,10 +1233,10 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_even_allones_and_poweroftwo: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,1,3067833783] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,u,2147483648,u] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,1,268435456,2147483648] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1249,7 +1249,7 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_even_allones_and_poweroftwo: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,1,3067833783] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1260,7 +1260,7 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_even_allones_and_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,1,3067833783] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -1277,11 +1277,11 @@ define <4 x i32> @test_urem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi ; CHECK-SSE2-LABEL: test_urem_odd_even_allones_and_poweroftwo: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,1,3264175145] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,1,268435456,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,u,3264175145,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,1073741824,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] @@ -1295,10 +1295,10 @@ define <4 x i32> @test_urem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi ; ; CHECK-SSE41-LABEL: test_urem_odd_even_allones_and_poweroftwo: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,1,3264175145] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,1073741824,u] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,1,268435456,1073741824] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1312,10 +1312,10 @@ define <4 x i32> @test_urem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi ; ; CHECK-AVX1-LABEL: test_urem_odd_even_allones_and_poweroftwo: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,3264175145] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,u,1073741824,u] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,1,268435456,1073741824] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1328,7 +1328,7 @@ define <4 x i32> @test_urem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi ; ; CHECK-AVX2-LABEL: test_urem_odd_even_allones_and_poweroftwo: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,3264175145] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1339,7 +1339,7 @@ define <4 x i32> @test_urem_odd_even_allones_and_poweroftwo(<4 x i32> %X) nounwi ; ; CHECK-AVX512VL-LABEL: test_urem_odd_even_allones_and_poweroftwo: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,3264175145] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -1358,9 +1358,9 @@ define <4 x i32> @test_urem_odd_allones_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_allones_and_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,0,3435973837] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,u,3435973837,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -1370,7 +1370,7 @@ define <4 x i32> @test_urem_odd_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_allones_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,0,3435973837] ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993459,1,4294967295,858993459] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -1379,7 +1379,7 @@ define <4 x i32> @test_urem_odd_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX-LABEL: test_urem_odd_allones_and_one: ; CHECK-AVX: # %bb.0: -; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,0,3435973837] ; CHECK-AVX-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX-NEXT: vpsrld $31, %xmm0, %xmm0 @@ -1395,11 +1395,11 @@ define <4 x i32> @test_urem_even_allones_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_even_allones_and_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,4294967295,0,3067833783] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,1,1,2147483648] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,u,3067833783,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,2147483648,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] @@ -1413,10 +1413,10 @@ define <4 x i32> @test_urem_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_even_allones_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,4294967295,0,3067833783] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,2147483648,u] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,1,1,2147483648] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1430,10 +1430,10 @@ define <4 x i32> @test_urem_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_even_allones_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,0,3067833783] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,u,2147483648,u] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,1,1,2147483648] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1446,7 +1446,7 @@ define <4 x i32> @test_urem_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_even_allones_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,0,3067833783] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1457,7 +1457,7 @@ define <4 x i32> @test_urem_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_even_allones_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,0,3067833783] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -1474,10 +1474,10 @@ define <4 x i32> @test_urem_odd_even_allones_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_even_allones_and_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,0,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,u,3264175145,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,1,1073741824,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -1490,9 +1490,9 @@ define <4 x i32> @test_urem_odd_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_even_allones_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,0,3264175145] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,1,1073741824,1073741824] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1506,9 +1506,9 @@ define <4 x i32> @test_urem_odd_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_even_allones_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,0,3264175145] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1,1,1073741824,1073741824] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1521,7 +1521,7 @@ define <4 x i32> @test_urem_odd_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_odd_even_allones_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,0,3264175145] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1532,7 +1532,7 @@ define <4 x i32> @test_urem_odd_even_allones_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_odd_even_allones_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,0,3264175145] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -1551,10 +1551,10 @@ define <4 x i32> @test_urem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_poweroftwo_and_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3435973837] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,3435973837,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,268435456,1,1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -1567,9 +1567,9 @@ define <4 x i32> @test_urem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_poweroftwo_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3435973837] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,268435456,1,1] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1583,9 +1583,9 @@ define <4 x i32> @test_urem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_poweroftwo_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3435973837] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [268435456,268435456,1,1] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1598,7 +1598,7 @@ define <4 x i32> @test_urem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_odd_poweroftwo_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3435973837] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1609,7 +1609,7 @@ define <4 x i32> @test_urem_odd_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_odd_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3435973837] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -1626,11 +1626,11 @@ define <4 x i32> @test_urem_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_even_poweroftwo_and_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,1,0,3067833783] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,268435456,1,2147483648] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,3067833783,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,u,2147483648,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] @@ -1644,10 +1644,10 @@ define <4 x i32> @test_urem_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_even_poweroftwo_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,1,0,3067833783] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,u,2147483648,u] +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,268435456,1,2147483648] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1661,10 +1661,10 @@ define <4 x i32> @test_urem_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_even_poweroftwo_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,1,0,3067833783] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [268435456,u,2147483648,u] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2147483648,268435456,1,2147483648] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1677,7 +1677,7 @@ define <4 x i32> @test_urem_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_even_poweroftwo_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,1,0,3067833783] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1688,7 +1688,7 @@ define <4 x i32> @test_urem_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_even_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,1,0,3067833783] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -1705,10 +1705,10 @@ define <4 x i32> @test_urem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_even_poweroftwo_and_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1,u,3264175145,u] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,268435456,1073741824,1073741824] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -1721,9 +1721,9 @@ define <4 x i32> @test_urem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_even_poweroftwo_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,1,0,3264175145] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [268435456,268435456,1073741824,1073741824] ; CHECK-SSE41-NEXT: pxor %xmm2, %xmm2 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1737,9 +1737,9 @@ define <4 x i32> @test_urem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_even_poweroftwo_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3264175145] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [268435456,268435456,1073741824,1073741824] ; CHECK-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1752,7 +1752,7 @@ define <4 x i32> @test_urem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_odd_even_poweroftwo_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3264175145] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1763,7 +1763,7 @@ define <4 x i32> @test_urem_odd_even_poweroftwo_and_one(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_odd_even_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,1,0,3264175145] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -1781,10 +1781,10 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou ; CHECK-SSE2-LABEL: test_urem_odd_allones_and_poweroftwo_and_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,0,0,0] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,1,0] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,268435456,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; CHECK-SSE2-NEXT: psrlq $32, %xmm0 @@ -1796,7 +1796,7 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou ; ; CHECK-SSE41-LABEL: test_urem_odd_allones_and_poweroftwo_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,4294967295,1,0] ; CHECK-SSE41-NEXT: pmovsxdq {{.*#+}} xmm1 = [1,268435456] ; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] @@ -1810,8 +1810,8 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou ; ; CHECK-AVX1-LABEL: test_urem_odd_allones_and_poweroftwo_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,0] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [1,u,268435456,u] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] ; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1822,7 +1822,7 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou ; ; CHECK-AVX2-LABEL: test_urem_odd_allones_and_poweroftwo_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,0] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1833,7 +1833,7 @@ define <4 x i32> @test_urem_odd_allones_and_poweroftwo_and_one(<4 x i32> %X) nou ; ; CHECK-AVX512VL-LABEL: test_urem_odd_allones_and_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,4294967295,1,0] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -1849,10 +1849,10 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no ; CHECK-SSE2-LABEL: test_urem_even_allones_and_poweroftwo_and_one: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4294967295,0,0,0] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,4294967295,1,0] +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2147483648,u,268435456,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; CHECK-SSE2-NEXT: psrlq $32, %xmm0 @@ -1864,7 +1864,7 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no ; ; CHECK-SSE41-LABEL: test_urem_even_allones_and_poweroftwo_and_one: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3067833783,4294967295,1,0] ; CHECK-SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = [2147483648,268435456] ; CHECK-SSE41-NEXT: pmuludq %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] @@ -1878,8 +1878,8 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no ; ; CHECK-AVX1-LABEL: test_urem_even_allones_and_poweroftwo_and_one: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,1,0] +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [2147483648,u,268435456,u] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] ; CHECK-AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1890,7 +1890,7 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no ; ; CHECK-AVX2-LABEL: test_urem_even_allones_and_poweroftwo_and_one: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,1,0] ; CHECK-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -1901,7 +1901,7 @@ define <4 x i32> @test_urem_even_allones_and_poweroftwo_and_one(<4 x i32> %X) no ; ; CHECK-AVX512VL-LABEL: test_urem_even_allones_and_poweroftwo_and_one: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3067833783,4294967295,1,0] ; CHECK-AVX512VL-NEXT: vprorvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll index 6a36cd2..8042103 100644 --- a/llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll +++ b/llvm/test/CodeGen/X86/urem-seteq-vec-nonzero.ll @@ -25,7 +25,7 @@ define <4 x i1> @t32_3(<4 x i32> %X) nounwind { ; CHECK-SSE41-LABEL: t32_3: ; CHECK-SSE41: # %bb.0: ; CHECK-SSE41-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655765,1431655764,1431655764,1431655764] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -34,7 +34,7 @@ define <4 x i1> @t32_3(<4 x i32> %X) nounwind { ; CHECK-AVX1-LABEL: t32_3: ; CHECK-AVX1: # %bb.0: ; CHECK-AVX1-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: retq @@ -80,7 +80,7 @@ define <4 x i1> @t32_5(<4 x i32> %X) nounwind { ; CHECK-SSE41-LABEL: t32_5: ; CHECK-SSE41: # %bb.0: ; CHECK-SSE41-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,858993458,858993458] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -89,7 +89,7 @@ define <4 x i1> @t32_5(<4 x i32> %X) nounwind { ; CHECK-AVX1-LABEL: t32_5: ; CHECK-AVX1: # %bb.0: ; CHECK-AVX1-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3435973837,3435973837,3435973837,3435973837] ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: retq @@ -140,7 +140,7 @@ define <4 x i1> @t32_6_part0(<4 x i32> %X) nounwind { ; CHECK-SSE41-LABEL: t32_6_part0: ; CHECK-SSE41: # %bb.0: ; CHECK-SSE41-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE41-NEXT: psrld $1, %xmm1 ; CHECK-SSE41-NEXT: pslld $31, %xmm0 @@ -153,7 +153,7 @@ define <4 x i1> @t32_6_part0(<4 x i32> %X) nounwind { ; CHECK-AVX1-LABEL: t32_6_part0: ; CHECK-AVX1: # %bb.0: ; CHECK-AVX1-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -211,7 +211,7 @@ define <4 x i1> @t32_6_part1(<4 x i32> %X) nounwind { ; CHECK-SSE41-LABEL: t32_6_part1: ; CHECK-SSE41: # %bb.0: ; CHECK-SSE41-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE41-NEXT: psrld $1, %xmm1 ; CHECK-SSE41-NEXT: pslld $31, %xmm0 @@ -224,7 +224,7 @@ define <4 x i1> @t32_6_part1(<4 x i32> %X) nounwind { ; CHECK-AVX1-LABEL: t32_6_part1: ; CHECK-AVX1: # %bb.0: ; CHECK-AVX1-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-AVX1-NEXT: vpsrld $1, %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpslld $31, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -275,7 +275,7 @@ define <4 x i1> @t32_tautological(<4 x i32> %X) nounwind { ; CHECK-SSE41-LABEL: t32_tautological: ; CHECK-SSE41: # %bb.0: ; CHECK-SSE41-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [4294967295,4294967295,4294967295,1431655764] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -286,7 +286,7 @@ define <4 x i1> @t32_tautological(<4 x i32> %X) nounwind { ; CHECK-AVX1-LABEL: t32_tautological: ; CHECK-AVX1: # %bb.0: ; CHECK-AVX1-NEXT: vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-splat.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-splat.ll index 2166e43..b490c3c 100644 --- a/llvm/test/CodeGen/X86/urem-seteq-vec-splat.ll +++ b/llvm/test/CodeGen/X86/urem-seteq-vec-splat.ll @@ -23,7 +23,7 @@ define <4 x i32> @test_urem_odd_25(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_25: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [171798691,171798691,171798691,171798691] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -32,7 +32,7 @@ define <4 x i32> @test_urem_odd_25(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_odd_25: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0 @@ -83,7 +83,7 @@ define <4 x i32> @test_urem_even_100(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_even_100: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE41-NEXT: psrld $2, %xmm1 ; CHECK-SSE41-NEXT: pslld $30, %xmm0 @@ -96,7 +96,7 @@ define <4 x i32> @test_urem_even_100(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_even_100: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-AVX1-NEXT: vpsrld $2, %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpslld $30, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -139,9 +139,9 @@ define <4 x i32> @test_urem_odd_neg25(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_odd_neg25: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,1030792151,1030792151,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [1030792151,u,3264175145,u] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -151,7 +151,7 @@ define <4 x i32> @test_urem_odd_neg25(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_odd_neg25: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [3264175145,1030792151,1030792151,3264175145] ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [171798691,1,1,171798691] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -160,7 +160,7 @@ define <4 x i32> @test_urem_odd_neg25(<4 x i32> %X) nounwind { ; ; CHECK-AVX-LABEL: test_urem_odd_neg25: ; CHECK-AVX: # %bb.0: -; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [3264175145,1030792151,1030792151,3264175145] ; CHECK-AVX-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX-NEXT: vpsrld $31, %xmm0, %xmm0 @@ -176,9 +176,9 @@ define <4 x i32> @test_urem_even_neg100(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: test_urem_even_neg100: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4252017623,3264175145,4252017623,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3264175145,3264175145,3264175145,3264175145] ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm1 @@ -192,7 +192,7 @@ define <4 x i32> @test_urem_even_neg100(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: test_urem_even_neg100: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4252017623,3264175145,4252017623,3264175145] ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE41-NEXT: psrld $2, %xmm1 ; CHECK-SSE41-NEXT: pslld $30, %xmm0 @@ -205,7 +205,7 @@ define <4 x i32> @test_urem_even_neg100(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: test_urem_even_neg100: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4252017623,3264175145,4252017623,3264175145] ; CHECK-AVX1-NEXT: vpsrld $2, %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpslld $30, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -216,7 +216,7 @@ define <4 x i32> @test_urem_even_neg100(<4 x i32> %X) nounwind { ; ; CHECK-AVX2-LABEL: test_urem_even_neg100: ; CHECK-AVX2: # %bb.0: -; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4252017623,3264175145,4252017623,3264175145] ; CHECK-AVX2-NEXT: vpsrld $2, %xmm0, %xmm1 ; CHECK-AVX2-NEXT: vpslld $30, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 @@ -227,7 +227,7 @@ define <4 x i32> @test_urem_even_neg100(<4 x i32> %X) nounwind { ; ; CHECK-AVX512VL-LABEL: test_urem_even_neg100: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4252017623,3264175145,4252017623,3264175145] ; CHECK-AVX512VL-NEXT: vprord $2, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -277,7 +277,7 @@ define <4 x i32> @test_urem_odd_undef1(<4 x i32> %X) nounwind { ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: psrld $3, %xmm2 -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [25,25,25,25] ; CHECK-SSE41-NEXT: psubd %xmm2, %xmm0 ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -293,7 +293,7 @@ define <4 x i32> @test_urem_odd_undef1(<4 x i32> %X) nounwind { ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpsrld $3, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [25,25,25,25] ; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -372,7 +372,7 @@ define <4 x i32> @test_urem_even_undef1(<4 x i32> %X) nounwind { ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-SSE41-NEXT: psrld $5, %xmm2 -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [100,100,100,100] ; CHECK-SSE41-NEXT: psubd %xmm2, %xmm0 ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -388,7 +388,7 @@ define <4 x i32> @test_urem_even_undef1(<4 x i32> %X) nounwind { ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; CHECK-AVX1-NEXT: vpsrld $5, %xmm1, %xmm1 -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [100,100,100,100] ; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll index 84856aa..e5b19a5 100644 --- a/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll +++ b/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll @@ -25,7 +25,7 @@ define <4 x i1> @t0_all_tautological(<4 x i32> %X) nounwind { define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: t1_all_odd_eq: ; CHECK-SSE2: # %bb.0: -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -33,7 +33,7 @@ define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: t1_all_odd_eq: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655765,4294967295,4294967295,4294967295] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -43,7 +43,7 @@ define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: t1_all_odd_eq: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 @@ -76,7 +76,7 @@ define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind { define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: t1_all_odd_ne: ; CHECK-SSE2: # %bb.0: -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -84,7 +84,7 @@ define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind { ; ; CHECK-SSE41-LABEL: t1_all_odd_ne: ; CHECK-SSE41: # %bb.0: -; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655765,4294967295,4294967295,4294967295] ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 @@ -95,7 +95,7 @@ define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind { ; ; CHECK-AVX1-LABEL: t1_all_odd_ne: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311531,2863311531,2863311531,2863311531] ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 @@ -187,7 +187,7 @@ define <2 x i1> @t3_wide(<2 x i64> %X) nounwind { ; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm3 ; CHECK-SSE2-NEXT: psrlq $32, %xmm3 ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm3 -; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311530,2863311530] ; CHECK-SSE2-NEXT: paddq %xmm3, %xmm0 ; CHECK-SSE2-NEXT: psllq $32, %xmm0 ; CHECK-SSE2-NEXT: paddq %xmm2, %xmm0 @@ -212,7 +212,7 @@ define <2 x i1> @t3_wide(<2 x i64> %X) nounwind { ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm3 ; CHECK-SSE41-NEXT: psrlq $32, %xmm3 ; CHECK-SSE41-NEXT: pmuludq %xmm1, %xmm3 -; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2863311530,2863311530] ; CHECK-SSE41-NEXT: paddq %xmm3, %xmm0 ; CHECK-SSE41-NEXT: psllq $32, %xmm0 ; CHECK-SSE41-NEXT: paddq %xmm2, %xmm0 @@ -236,7 +236,7 @@ define <2 x i1> @t3_wide(<2 x i64> %X) nounwind { ; CHECK-AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 ; CHECK-AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3 ; CHECK-AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311530,2863311530] ; CHECK-AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpsllq $32, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0 @@ -255,7 +255,7 @@ define <2 x i1> @t3_wide(<2 x i64> %X) nounwind { ; CHECK-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 ; CHECK-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3 ; CHECK-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; CHECK-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2863311530,2863311530] ; CHECK-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 diff --git a/llvm/test/CodeGen/X86/var-permute-128.ll b/llvm/test/CodeGen/X86/var-permute-128.ll index 6174011..fce8795 100644 --- a/llvm/test/CodeGen/X86/var-permute-128.ll +++ b/llvm/test/CodeGen/X86/var-permute-128.ll @@ -5,9 +5,9 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,XOP ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX512 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX512 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX512,AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX512,AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX512,AVX512BW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL,AVX512VLBW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+avx512vbmi | FileCheck %s --check-prefixes=AVX,AVX512VL,VLVBMI @@ -241,7 +241,7 @@ define <4 x i32> @var_shuffle_v4i32(<4 x i32> %v, <4 x i32> %indices) nounwind { ; ; SSE41-LABEL: var_shuffle_v4i32: ; SSE41: # %bb.0: -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [67372036,67372036,67372036,67372036] ; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE41-NEXT: pshufb %xmm1, %xmm0 ; SSE41-NEXT: retq @@ -319,7 +319,7 @@ define <4 x i32> @var_shuffle_zero_v4i32(<4 x i32> %v, <4 x i32> %indices) nounw ; SSE41-NEXT: pmaxud %xmm1, %xmm2 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm2 ; SSE41-NEXT: por %xmm2, %xmm1 -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [67372036,67372036,67372036,67372036] ; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE41-NEXT: por %xmm2, %xmm1 ; SSE41-NEXT: pshufb %xmm1, %xmm0 @@ -598,6 +598,33 @@ define <8 x i16> @var_shuffle_zero_v8i16(<8 x i16> %v, <8 x i16> %indices) nounw ; AVX2-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; +; AVX512F-LABEL: var_shuffle_zero_v8i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmaxuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2 +; AVX512F-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm2 +; AVX512F-NEXT: vpor %xmm1, %xmm2, %xmm1 +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [514,514,514,514,514,514,514,514] +; AVX512F-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX512F-NEXT: vpor %xmm2, %xmm1, %xmm1 +; AVX512F-NEXT: vpshufb %xmm1, %xmm0, %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: var_shuffle_zero_v8i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512BW-NEXT: vpbroadcastw {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7] +; AVX512BW-NEXT: vpcmpnleuw %zmm2, %zmm1, %k1 +; AVX512BW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm1 {%k1} +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [514,514,514,514,514,514,514,514] +; AVX512BW-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX512BW-NEXT: vpshufb %xmm1, %xmm0, %xmm0 +; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512BW-NEXT: vmovdqu16 %zmm1, %zmm0 {%k1} +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq +; ; AVX512VL-LABEL: var_shuffle_zero_v8i16: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpcmpnleuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1 @@ -921,6 +948,28 @@ define <16 x i8> @var_shuffle_zero_v16i8(<16 x i8> %v, <16 x i8> %indices) nounw ; AVX2-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; +; AVX512F-LABEL: var_shuffle_zero_v16i8: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmaxub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2 +; AVX512F-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm2 +; AVX512F-NEXT: vpor %xmm1, %xmm2, %xmm1 +; AVX512F-NEXT: vpshufb %xmm1, %xmm0, %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: var_shuffle_zero_v16i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512BW-NEXT: vpbroadcastb {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512BW-NEXT: vpcmpnleub %zmm2, %zmm1, %k1 +; AVX512BW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm1 {%k1} +; AVX512BW-NEXT: vpshufb %xmm1, %xmm0, %xmm0 +; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq +; ; AVX512VL-LABEL: var_shuffle_zero_v16i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpcmpnleub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1 @@ -1212,7 +1261,7 @@ define <4 x float> @var_shuffle_v4f32(<4 x float> %v, <4 x i32> %indices) nounwi ; ; SSE41-LABEL: var_shuffle_v4f32: ; SSE41: # %bb.0: -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [67372036,67372036,67372036,67372036] ; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE41-NEXT: pshufb %xmm1, %xmm0 ; SSE41-NEXT: retq @@ -1290,7 +1339,7 @@ define <4 x float> @var_shuffle_zero_v4f32(<4 x float> %v, <4 x i32> %indices) n ; SSE41-NEXT: pmaxud %xmm1, %xmm2 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm2 ; SSE41-NEXT: por %xmm2, %xmm1 -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [67372036,67372036,67372036,67372036] ; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE41-NEXT: por %xmm2, %xmm1 ; SSE41-NEXT: pshufb %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vec_reassociate.ll b/llvm/test/CodeGen/X86/vec_reassociate.ll index a9473fff..4703ca3 100644 --- a/llvm/test/CodeGen/X86/vec_reassociate.ll +++ b/llvm/test/CodeGen/X86/vec_reassociate.ll @@ -38,13 +38,13 @@ define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) { ; X86-LABEL: mul_4i32: ; X86: # %bb.0: ; X86-NEXT: pmulld %xmm1, %xmm0 -; X86-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [4,6,6,4] ; X86-NEXT: retl ; ; X64-LABEL: mul_4i32: ; X64: # %bb.0: ; X64-NEXT: pmulld %xmm1, %xmm0 -; X64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,6,6,4] ; X64-NEXT: retq %1 = mul <4 x i32> %a0, <i32 1, i32 2, i32 3, i32 4> %2 = mul <4 x i32> %a1, <i32 4, i32 3, i32 2, i32 1> @@ -56,13 +56,13 @@ define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { ; X86-LABEL: mul_4i32_commute: ; X86: # %bb.0: ; X86-NEXT: pmulld %xmm1, %xmm0 -; X86-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [4,6,6,4] ; X86-NEXT: retl ; ; X64-LABEL: mul_4i32_commute: ; X64: # %bb.0: ; X64-NEXT: pmulld %xmm1, %xmm0 -; X64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,6,6,4] ; X64-NEXT: retq %1 = mul <4 x i32> <i32 1, i32 2, i32 3, i32 4>, %a0 %2 = mul <4 x i32> <i32 4, i32 3, i32 2, i32 1>, %a1 diff --git a/llvm/test/CodeGen/X86/vector-fshl-128.ll b/llvm/test/CodeGen/X86/vector-fshl-128.ll index 762900e..a0c2760 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-128.ll @@ -1821,9 +1821,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [32,u,128,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: por %xmm1, %xmm0 @@ -1841,7 +1841,7 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; SSE41-NEXT: psrld $28, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2,3],xmm2[4,5,6,7] ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128] ; SSE41-NEXT: por %xmm2, %xmm0 ; SSE41-NEXT: retq ; @@ -1854,7 +1854,7 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; AVX1-NEXT: vpsrld $28, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128] ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; @@ -1935,9 +1935,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; X86-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16,32,64,128] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 # [32,u,128,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; X86-SSE2-NEXT: por %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll index 445e572..2fadf5f 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll @@ -1647,7 +1647,7 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind { ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [256,512,1024,2048] ; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsrld $25, %xmm1, %xmm3 ; AVX1-NEXT: vpsrld $27, %xmm1, %xmm4 @@ -1656,7 +1656,7 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind { ; AVX1-NEXT: vpsrld $28, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128] ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll index d0690bd..ec2efcd 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll @@ -1302,9 +1302,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind { ; SSE2-LABEL: constant_funnnel_v4i32: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,128,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] @@ -1316,8 +1316,8 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind { ; SSE41-LABEL: constant_funnnel_v4i32: ; SSE41: # %bb.0: ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,128,u] +; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128] ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1328,8 +1328,8 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind { ; AVX1-LABEL: constant_funnnel_v4i32: ; AVX1: # %bb.0: ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [32,u,128,u] +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128] ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1394,9 +1394,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind { ; X86-SSE2-LABEL: constant_funnnel_v4i32: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16,32,64,128] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [32,u,128,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll index 421fa98..5f7e407 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll @@ -1082,13 +1082,13 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x) nounwind { ; AVX1-LABEL: constant_funnnel_v8i32: ; AVX1: # %bb.0: ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [32,u,128,u] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [512,u,2048,u] ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128] +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [256,512,1024,2048] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: vmovshdup {{.*#+}} ymm2 = ymm0[1,1,3,3,5,5,7,7] ; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7] diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll index b378dce..304daab 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll @@ -319,9 +319,9 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind { ; SSE2-LABEL: constant_funnnel_v2i32: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,1,1] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,1,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] @@ -333,8 +333,8 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind { ; SSE41-LABEL: constant_funnnel_v2i32: ; SSE41: # %bb.0: ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,1,u] +; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,1,1] ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -345,8 +345,8 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind { ; AVX1-LABEL: constant_funnnel_v2i32: ; AVX1: # %bb.0: ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [32,u,1,u] +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,1,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -411,9 +411,9 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind { ; X86-SSE2-LABEL: constant_funnnel_v2i32: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16,32,1,1] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [32,u,1,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] diff --git a/llvm/test/CodeGen/X86/vector-fshl-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-sub128.ll index 06ff7e7..ae5dd18 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-sub128.ll @@ -500,9 +500,9 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y) nounwind { ; SSE2-NEXT: psrld $27, %xmm2 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,1,1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [32,u,1,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: por %xmm1, %xmm0 @@ -514,7 +514,7 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y) nounwind { ; SSE41-NEXT: psrld $27, %xmm2 ; SSE41-NEXT: psrld $28, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6,7] -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,1,1] ; SSE41-NEXT: por %xmm2, %xmm0 ; SSE41-NEXT: retq ; @@ -523,7 +523,7 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y) nounwind { ; AVX1-NEXT: vpsrld $27, %xmm1, %xmm2 ; AVX1-NEXT: vpsrld $28, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6,7] -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,1,1] ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; @@ -598,9 +598,9 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x, <2 x i32> %y) nounwind { ; X86-SSE2-NEXT: psrld $27, %xmm2 ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16,32,1,1] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 # [32,u,1,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; X86-SSE2-NEXT: por %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll index 9b52857..33a6a76 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll @@ -1741,9 +1741,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,134217728,67108864,33554432] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [134217728,u,33554432,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: por %xmm1, %xmm0 @@ -1761,7 +1761,7 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; SSE41-NEXT: psrld $4, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2,3],xmm2[4,5,6,7] ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,134217728,67108864,33554432] ; SSE41-NEXT: por %xmm2, %xmm0 ; SSE41-NEXT: retq ; @@ -1774,7 +1774,7 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; AVX1-NEXT: vpsrld $4, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [268435456,134217728,67108864,33554432] ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; @@ -1856,9 +1856,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; X86-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [268435456,134217728,67108864,33554432] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 # [134217728,u,33554432,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; X86-SSE2-NEXT: por %xmm1, %xmm0 @@ -1872,7 +1872,7 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535] ; SSE2-NEXT: pandn %xmm1, %xmm2 -; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,0,0,128,0,64,0,32,0,16,0,8,0,4,0,2] ; SSE2-NEXT: por %xmm1, %xmm2 ; SSE2-NEXT: paddw %xmm0, %xmm0 ; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [32768,16384,8192,4096,2048,1024,512,256] @@ -1964,7 +1964,7 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535] ; X86-SSE2-NEXT: pandn %xmm1, %xmm2 -; X86-SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [0,0,0,128,0,64,0,32,0,16,0,8,0,4,0,2] ; X86-SSE2-NEXT: por %xmm1, %xmm2 ; X86-SSE2-NEXT: paddw %xmm0, %xmm0 ; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [32768,16384,8192,4096,2048,1024,512,256] diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll index a387562..217431be 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll @@ -1403,7 +1403,7 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind { ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [16777216,8388608,4194304,2097152] ; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsrld $7, %xmm1, %xmm3 ; AVX1-NEXT: vpsrld $5, %xmm1, %xmm4 @@ -1412,7 +1412,7 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind { ; AVX1-NEXT: vpsrld $4, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [268435456,134217728,67108864,33554432] ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll index 4969cb5..5d01dfd 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll @@ -1380,9 +1380,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind { ; SSE2-LABEL: constant_funnnel_v4i32: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,134217728,67108864,33554432] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [134217728,u,33554432,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] @@ -1394,8 +1394,8 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind { ; SSE41-LABEL: constant_funnnel_v4i32: ; SSE41: # %bb.0: ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [134217728,u,33554432,u] +; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,134217728,67108864,33554432] ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1406,8 +1406,8 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind { ; AVX1-LABEL: constant_funnnel_v4i32: ; AVX1: # %bb.0: ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [134217728,u,33554432,u] +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [268435456,134217728,67108864,33554432] ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1472,9 +1472,9 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind { ; X86-SSE2-LABEL: constant_funnnel_v4i32: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [268435456,134217728,67108864,33554432] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [134217728,u,33554432,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll index e2a3e26..4dc931d 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll @@ -1134,13 +1134,13 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x) nounwind { ; AVX1-LABEL: constant_funnnel_v8i32: ; AVX1: # %bb.0: ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [134217728,u,33554432,u] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [8388608,u,2097152,u] ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [268435456,134217728,67108864,33554432] +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [16777216,8388608,4194304,2097152] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: vmovshdup {{.*#+}} ymm2 = ymm0[1,1,3,3,5,5,7,7] ; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7] diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll index ef5ffe4..4b42b18 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll @@ -341,9 +341,9 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind { ; SSE2-LABEL: constant_funnnel_v2i32: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,134217728,1,1] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [134217728,u,1,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] @@ -355,8 +355,8 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind { ; SSE41-LABEL: constant_funnnel_v2i32: ; SSE41: # %bb.0: ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [134217728,u,1,u] +; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [268435456,134217728,1,1] ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -367,8 +367,8 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind { ; AVX1-LABEL: constant_funnnel_v2i32: ; AVX1: # %bb.0: ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [134217728,u,1,u] +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [268435456,134217728,1,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -433,9 +433,9 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind { ; X86-SSE2-LABEL: constant_funnnel_v2i32: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [268435456,134217728,1,1] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [134217728,u,1,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll index 816d5ca..e68d1d7 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll @@ -171,7 +171,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind { ; SSE-NEXT: pxor %xmm1, %xmm1 ; SSE-NEXT: pxor %xmm2, %xmm2 ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [37632,37632,37632,37632,37632,37632,37632,37632] +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147] ; SSE-NEXT: pmulhw %xmm3, %xmm2 ; SSE-NEXT: psrlw $8, %xmm2 ; SSE-NEXT: pxor %xmm4, %xmm4 @@ -193,7 +193,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [37632,37632,37632,37632,37632,37632,37632,37632] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147] ; AVX1-NEXT: vpmulhw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] @@ -260,11 +260,11 @@ define <16 x i8> @test_divconstant_16i8(<16 x i8> %a) nounwind { ; SSE-NEXT: pxor %xmm1, %xmm1 ; SSE-NEXT: pxor %xmm2, %xmm2 ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [37632,20224,11008,47872,26368,14592,14592,37632] +; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,147,0,79,0,43,0,187,0,103,0,57,0,57,0,147] ; SSE-NEXT: psrlw $8, %xmm2 ; SSE-NEXT: pxor %xmm3, %xmm3 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37632,33024,14592,26368,47872,11008,20224,37632] +; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147] ; SSE-NEXT: psrlw $8, %xmm3 ; SSE-NEXT: packuswb %xmm2, %xmm3 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -287,10 +287,10 @@ define <16 x i8> @test_divconstant_16i8(<16 x i8> %a) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [37632,20224,11008,47872,26368,14592,14592,37632] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,147,0,79,0,43,0,187,0,103,0,57,0,57,0,147] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37632,33024,14592,26368,47872,11008,20224,37632] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 @@ -561,7 +561,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE-NEXT: pxor %xmm1, %xmm1 ; SSE-NEXT: pxor %xmm2, %xmm2 ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [37632,37632,37632,37632,37632,37632,37632,37632] +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147] ; SSE-NEXT: pmulhw %xmm3, %xmm2 ; SSE-NEXT: psrlw $8, %xmm2 ; SSE-NEXT: pxor %xmm4, %xmm4 @@ -588,7 +588,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [37632,37632,37632,37632,37632,37632,37632,37632] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147] ; AVX1-NEXT: vpmulhw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] @@ -667,11 +667,11 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; SSE2-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [37632,20224,11008,47872,26368,14592,14592,37632] +; SSE2-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,147,0,79,0,43,0,187,0,103,0,57,0,57,0,147] ; SSE2-NEXT: psrlw $8, %xmm1 ; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37632,33024,14592,26368,47872,11008,20224,37632] +; SSE2-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147] ; SSE2-NEXT: psrlw $8, %xmm3 ; SSE2-NEXT: packuswb %xmm1, %xmm3 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,0,255,0,0,255,255,0,0,255,0,0,0,255] @@ -706,11 +706,11 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [37632,20224,11008,47872,26368,14592,14592,37632] +; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,147,0,79,0,43,0,187,0,103,0,57,0,57,0,147] ; SSE41-NEXT: psrlw $8, %xmm2 ; SSE41-NEXT: pxor %xmm3, %xmm3 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37632,33024,14592,26368,47872,11008,20224,37632] +; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147] ; SSE41-NEXT: psrlw $8, %xmm3 ; SSE41-NEXT: packuswb %xmm2, %xmm3 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,0,255,0,0,255,255,0,0,255,0,0,0,255] @@ -741,10 +741,10 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [37632,20224,11008,47872,26368,14592,14592,37632] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,147,0,79,0,43,0,187,0,103,0,57,0,57,0,147] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37632,33024,14592,26368,47872,11008,20224,37632] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm3 @@ -1116,11 +1116,11 @@ define <16 x i8> @PR143238(<16 x i8> %a0) { ; SSE-NEXT: pxor %xmm1, %xmm1 ; SSE-NEXT: pxor %xmm2, %xmm2 ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [26368,47872,11008,20224,37632,35072,33024,30976] +; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,103,0,187,0,43,0,79,0,147,0,137,0,129,0,121] ; SSE-NEXT: psrlw $8, %xmm2 ; SSE-NEXT: pxor %xmm3, %xmm3 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [33024,22016,33024,26368,11008,37632,33024,14592] +; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [0,129,0,86,0,129,0,103,0,43,0,147,0,129,0,57] ; SSE-NEXT: psrlw $8, %xmm3 ; SSE-NEXT: packuswb %xmm2, %xmm3 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 @@ -1144,10 +1144,10 @@ define <16 x i8> @PR143238(<16 x i8> %a0) { ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [26368,47872,11008,20224,37632,35072,33024,30976] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,103,0,187,0,43,0,79,0,147,0,137,0,129,0,121] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [33024,22016,33024,26368,11008,37632,33024,14592] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,129,0,86,0,129,0,103,0,43,0,147,0,129,0,57] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll index 63c69e5..7355f36 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll @@ -161,7 +161,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [37632,37632,37632,37632,37632,37632,37632,37632] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147] ; AVX1-NEXT: vpmulhw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] @@ -198,7 +198,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW: # %bb.0: ; AVX2NOBW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31] -; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632] +; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147] ; AVX2NOBW-NEXT: vpmulhw %ymm3, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23] @@ -245,10 +245,10 @@ define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37632,20224,11008,47872,26368,14592,33024,37632] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [47872,12544,26368,6912,14592,30976,33024,35072] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3 @@ -266,10 +266,10 @@ define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpcmpgtb %xmm2, %xmm1, %xmm4 ; AVX1-NEXT: vpsubb %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [35072,33024,30976,14592,6912,26368,12544,47872] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [37632,33024,14592,26368,47872,11008,20224,37632] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147] ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0 @@ -291,10 +291,10 @@ define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW: # %bb.0: ; AVX2NOBW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31] -; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [35072,33024,30976,14592,6912,26368,12544,47872,37632,20224,11008,47872,26368,14592,33024,37632] +; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187,0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147] ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23] -; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [37632,33024,14592,26368,47872,11008,20224,37632,47872,12544,26368,6912,14592,30976,33024,35072] +; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147,0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137] ; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX2NOBW-NEXT: vpackuswb %ymm2, %ymm3, %ymm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 @@ -539,7 +539,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [37632,37632,37632,37632,37632,37632,37632,37632] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147] ; AVX1-NEXT: vpmulhw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] @@ -585,7 +585,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW: # %bb.0: ; AVX2NOBW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31] -; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632] +; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147] ; AVX2NOBW-NEXT: vpmulhw %ymm3, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23] @@ -640,10 +640,10 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37632,20224,11008,47872,26368,14592,33024,37632] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [47872,12544,26368,6912,14592,30976,33024,35072] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm4 @@ -668,10 +668,10 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3 ; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [35072,33024,30976,14592,6912,26368,12544,47872] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [37632,33024,14592,26368,47872,11008,20224,37632] +; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147] ; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5 ; AVX1-NEXT: vpackuswb %xmm3, %xmm5, %xmm3 ; AVX1-NEXT: vpaddb %xmm4, %xmm3, %xmm3 @@ -699,10 +699,10 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW: # %bb.0: ; AVX2NOBW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31] -; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [35072,33024,30976,14592,6912,26368,12544,47872,37632,20224,11008,47872,26368,14592,33024,37632] +; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187,0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147] ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23] -; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [37632,33024,14592,26368,47872,11008,20224,37632,47872,12544,26368,6912,14592,30976,33024,35072] +; AVX2NOBW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147,0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137] ; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX2NOBW-NEXT: vpackuswb %ymm2, %ymm3, %ymm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3 diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll index 6bc4fcb..5445330 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll @@ -132,7 +132,7 @@ define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31] -; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632] +; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147] ; AVX512F-NEXT: vpmulhw %ymm4, %ymm3, %ymm3 ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23] @@ -169,7 +169,7 @@ define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind { ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] -; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632] +; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147] ; AVX512BW-NEXT: vpmulhw %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] @@ -199,10 +199,10 @@ define <64 x i8> @test_divconstant_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31] -; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [35072,18176,37632,4864,20224,10496,11008,45824,37632,20224,11008,47872,26368,14592,33024,37632] +; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,137,0,71,0,147,0,19,0,79,0,41,0,43,0,179,0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147] ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23] -; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [6912,28416,14592,15104,30976,32000,33024,34048,47872,12544,26368,6912,14592,30976,33024,35072] +; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [0,27,0,111,0,57,0,59,0,121,0,125,0,129,0,133,0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137] ; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX512F-NEXT: vpackuswb %ymm3, %ymm2, %ymm2 ; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3 @@ -220,10 +220,10 @@ define <64 x i8> @test_divconstant_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm1, %ymm4 ; AVX512F-NEXT: vpsubb %ymm4, %ymm2, %ymm2 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31] -; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [35072,33024,30976,14592,6912,26368,12544,47872,34048,33024,32000,30976,15104,14592,28416,6912] +; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187,0,133,0,129,0,125,0,121,0,59,0,57,0,111,0,27] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23] -; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [37632,33024,14592,26368,47872,11008,20224,37632,45824,11008,10496,20224,4864,37632,18176,35072] +; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147,0,179,0,43,0,41,0,79,0,19,0,147,0,71,0,137] ; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512F-NEXT: vpackuswb %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpaddb %ymm3, %ymm0, %ymm0 @@ -245,10 +245,10 @@ define <64 x i8> @test_divconstant_64i8(<64 x i8> %a) nounwind { ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] -; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [35072,33024,30976,14592,6912,26368,12544,47872,34048,33024,32000,30976,15104,14592,28416,6912,35072,18176,37632,4864,20224,10496,11008,45824,37632,20224,11008,47872,26368,14592,33024,37632] +; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187,0,133,0,129,0,125,0,121,0,59,0,57,0,111,0,27,0,137,0,71,0,147,0,19,0,79,0,41,0,43,0,179,0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147] ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] -; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [37632,33024,14592,26368,47872,11008,20224,37632,45824,11008,10496,20224,4864,37632,18176,35072,6912,28416,14592,15104,30976,32000,33024,34048,47872,12544,26368,6912,14592,30976,33024,35072] +; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147,0,179,0,43,0,41,0,79,0,19,0,147,0,71,0,137,0,27,0,111,0,57,0,59,0,121,0,125,0,129,0,133,0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137] ; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1 ; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 @@ -444,7 +444,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31] -; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632] +; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147] ; AVX512F-NEXT: vpmulhw %ymm4, %ymm3, %ymm3 ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23] @@ -490,7 +490,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] -; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632,37632] +; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147,0,147] ; AVX512BW-NEXT: vpmulhw %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] @@ -524,10 +524,10 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31] -; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [35072,18176,37632,4864,20224,10496,11008,45824,37632,20224,11008,47872,26368,14592,33024,37632] +; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,137,0,71,0,147,0,19,0,79,0,41,0,43,0,179,0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147] ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23] -; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [6912,28416,14592,15104,30976,32000,33024,34048,47872,12544,26368,6912,14592,30976,33024,35072] +; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,27,0,111,0,57,0,59,0,121,0,125,0,129,0,133,0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 ; AVX512F-NEXT: vpackuswb %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm4 @@ -552,10 +552,10 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpor %ymm3, %ymm5, %ymm3 ; AVX512F-NEXT: vpsubb %ymm3, %ymm2, %ymm2 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31] -; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [35072,33024,30976,14592,6912,26368,12544,47872,34048,33024,32000,30976,15104,14592,28416,6912] +; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187,0,133,0,129,0,125,0,121,0,59,0,57,0,111,0,27] ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23] -; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [37632,33024,14592,26368,47872,11008,20224,37632,45824,11008,10496,20224,4864,37632,18176,35072] +; AVX512F-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147,0,179,0,43,0,41,0,79,0,19,0,147,0,71,0,137] ; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5 ; AVX512F-NEXT: vpackuswb %ymm3, %ymm5, %ymm3 ; AVX512F-NEXT: vpaddb %ymm4, %ymm3, %ymm3 @@ -583,10 +583,10 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind { ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] -; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [35072,33024,30976,14592,6912,26368,12544,47872,34048,33024,32000,30976,15104,14592,28416,6912,35072,18176,37632,4864,20224,10496,11008,45824,37632,20224,11008,47872,26368,14592,33024,37632] +; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [0,137,0,129,0,121,0,57,0,27,0,103,0,49,0,187,0,133,0,129,0,125,0,121,0,59,0,57,0,111,0,27,0,137,0,71,0,147,0,19,0,79,0,41,0,43,0,179,0,147,0,79,0,43,0,187,0,103,0,57,0,129,0,147] ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] -; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [37632,33024,14592,26368,47872,11008,20224,37632,45824,11008,10496,20224,4864,37632,18176,35072,6912,28416,14592,15104,30976,32000,33024,34048,47872,12544,26368,6912,14592,30976,33024,35072] +; AVX512BW-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [0,147,0,129,0,57,0,103,0,187,0,43,0,79,0,147,0,179,0,43,0,41,0,79,0,19,0,147,0,71,0,137,0,27,0,111,0,57,0,59,0,121,0,125,0,129,0,133,0,187,0,49,0,103,0,27,0,57,0,121,0,129,0,137] ; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1 ; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll index 33d80f6..6cd5098 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -169,7 +169,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [37,37,37,37,37,37,37,37] +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm4 @@ -209,7 +209,7 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero @@ -270,22 +270,22 @@ define <16 x i8> @test_divconstant_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,256,256,256,256,256,256,256] ; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [147,79,171,117,205,57,57,37] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [147,0,79,0,171,0,117,0,205,0,57,0,57,0,37,0] ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] ; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [256,256,256,256,256,256,256,128] ; SSE2-NEXT: psrlw $8, %xmm3 -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37,32,57,205,117,171,79,147] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0] ; SSE2-NEXT: psrlw $8, %xmm3 ; SSE2-NEXT: packuswb %xmm2, %xmm3 ; SSE2-NEXT: psubb %xmm3, %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,0,0,128,0,0,0,128] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [128,0,0,0,128,0,0,0] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0] ; SSE2-NEXT: psrlw $8, %xmm0 ; SSE2-NEXT: packuswb %xmm2, %xmm0 ; SSE2-NEXT: paddb %xmm3, %xmm0 @@ -309,7 +309,7 @@ define <16 x i8> @test_divconstant_16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: psllw $7, %xmm3 ; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1,2,3,4,5,6,7] ; SSE41-NEXT: psrlw $8, %xmm3 -; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [147,79,171,117,205,57,57,37] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [147,0,79,0,171,0,117,0,205,0,57,0,57,0,37,0] ; SSE41-NEXT: psrlw $8, %xmm3 ; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] @@ -317,15 +317,15 @@ define <16 x i8> @test_divconstant_16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: psllw $7, %xmm4 ; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1,2,3,4,5,6],xmm4[7] ; SSE41-NEXT: psrlw $8, %xmm4 -; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [37,32,57,205,117,171,79,147] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0] ; SSE41-NEXT: psrlw $8, %xmm4 ; SSE41-NEXT: packuswb %xmm3, %xmm4 ; SSE41-NEXT: psubb %xmm4, %xmm0 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,128,0,0,0,128] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; SSE41-NEXT: psrlw $8, %xmm0 -; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,0,0,0,128,0,0,0] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0] ; SSE41-NEXT: psrlw $8, %xmm2 ; SSE41-NEXT: packuswb %xmm0, %xmm2 ; SSE41-NEXT: paddb %xmm4, %xmm2 @@ -346,22 +346,22 @@ define <16 x i8> @test_divconstant_16i8(<16 x i8> %a) nounwind { ; AVX1-NEXT: vpsllw $7, %xmm3, %xmm3 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [147,79,171,117,205,57,57,37] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [147,0,79,0,171,0,117,0,205,0,57,0,57,0,37,0] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX1-NEXT: vpsllw $7, %xmm4, %xmm4 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6],xmm4[7] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37,32,57,205,117,171,79,147] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsubb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,0,0,128,0,0,0,128] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [128,0,0,0,128,0,0,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0] ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0 @@ -638,7 +638,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [37,37,37,37,37,37,37,37] +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm4 @@ -690,7 +690,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero @@ -763,23 +763,23 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,256,256,256,256,256,256,256] ; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [147,79,171,117,205,57,57,37] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [147,0,79,0,171,0,117,0,205,0,57,0,57,0,37,0] ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] ; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [256,256,256,256,256,256,256,128] ; SSE2-NEXT: psrlw $8, %xmm3 -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37,32,57,205,117,171,79,147] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0] ; SSE2-NEXT: psrlw $8, %xmm3 ; SSE2-NEXT: packuswb %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: psubb %xmm3, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [0,0,0,128,0,0,0,128] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; SSE2-NEXT: psrlw $8, %xmm4 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,0,0,0,128,0,0,0] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0] ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: packuswb %xmm4, %xmm2 ; SSE2-NEXT: paddb %xmm3, %xmm2 @@ -809,7 +809,7 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: psllw $7, %xmm3 ; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1,2,3,4,5,6,7] ; SSE41-NEXT: psrlw $8, %xmm3 -; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [147,79,171,117,205,57,57,37] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [147,0,79,0,171,0,117,0,205,0,57,0,57,0,37,0] ; SSE41-NEXT: psrlw $8, %xmm3 ; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] @@ -817,16 +817,16 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: psllw $7, %xmm4 ; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1,2,3,4,5,6],xmm4[7] ; SSE41-NEXT: psrlw $8, %xmm4 -; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [37,32,57,205,117,171,79,147] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0] ; SSE41-NEXT: psrlw $8, %xmm4 ; SSE41-NEXT: packuswb %xmm3, %xmm4 ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psubb %xmm4, %xmm2 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero ; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,0,0,128,0,0,0,128] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; SSE41-NEXT: psrlw $8, %xmm2 -; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [128,0,0,0,128,0,0,0] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0] ; SSE41-NEXT: psrlw $8, %xmm3 ; SSE41-NEXT: packuswb %xmm2, %xmm3 ; SSE41-NEXT: paddb %xmm4, %xmm3 @@ -854,22 +854,22 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; AVX1-NEXT: vpsllw $7, %xmm3, %xmm3 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [147,79,171,117,205,57,57,37] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [147,0,79,0,171,0,117,0,205,0,57,0,57,0,37,0] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX1-NEXT: vpsllw $7, %xmm4, %xmm4 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6],xmm4[7] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37,32,57,205,117,171,79,147] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsubb %xmm2, %xmm0, %xmm3 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15] -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,0,128,0,0,0,128] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [128,0,0,0,128,0,0,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll index e43108f..98ea87c 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -166,7 +166,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [37,37,37,37,37,37,37,37] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero @@ -200,7 +200,7 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW: # %bb.0: ; AVX2NOBW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] -; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] @@ -246,22 +246,22 @@ define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpsllw $7, %xmm4, %xmm4 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [147,79,171,117,205,57,32,37] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero ; AVX1-NEXT: vpsllw $7, %xmm5, %xmm5 ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [187,135,205,27,57,241,16,137] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,0,128,0,0,0,128] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,128,0,0,0,0,0,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2 @@ -276,22 +276,22 @@ define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm4 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6],xmm4[7] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [137,16,241,57,27,205,135,187] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX1-NEXT: vpsllw $7, %xmm5, %xmm5 ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6],xmm5[7] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [37,32,57,205,117,171,79,147] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,0,0,0,0,128,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [128,0,0,0,128,0,0,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0] ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0 @@ -312,20 +312,20 @@ define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm3 = [256,256,256,256,256,256,256,128,128,256,256,256,256,256,256,256] ; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [137,16,241,57,27,205,135,187,147,79,171,117,205,57,32,37] +; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0,147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0] ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] ; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm4, %ymm3 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3 -; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [37,32,57,205,117,171,79,147,187,135,205,27,57,241,16,137] +; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0,187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0] ; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX2NOBW-NEXT: vpackuswb %ymm2, %ymm3, %ymm2 ; AVX2NOBW-NEXT: vpsubb %ymm2, %ymm0, %ymm0 ; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] -; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,0,0,0,0,0,128,0,0,0,0,128,0,0,0,128] +; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] -; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,0,0,0,128,0,0,0,0,128,0,0,0,0,0,0] +; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0] ; AVX2NOBW-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm0, %ymm0 ; AVX2NOBW-NEXT: vpaddb %ymm2, %ymm0, %ymm0 @@ -578,7 +578,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [37,37,37,37,37,37,37,37] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero @@ -622,7 +622,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW: # %bb.0: ; AVX2NOBW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] -; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX2NOBW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] @@ -676,22 +676,22 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpsllw $7, %xmm4, %xmm4 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [147,79,171,117,205,57,32,37] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero ; AVX1-NEXT: vpsllw $7, %xmm5, %xmm5 ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [187,135,205,27,57,241,16,137] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm4 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [0,0,0,128,0,0,0,128] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,128,0,0,0,0,0,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; AVX1-NEXT: vpackuswb %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpaddb %xmm3, %xmm4, %xmm3 @@ -713,22 +713,22 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm5 ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6],xmm5[7] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [137,16,241,57,27,205,135,187] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX1-NEXT: vpsllw $7, %xmm6, %xmm6 ; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6],xmm6[7] ; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5 -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [37,32,57,205,117,171,79,147] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0] ; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5 ; AVX1-NEXT: vpackuswb %xmm4, %xmm5, %xmm4 ; AVX1-NEXT: vpsubb %xmm4, %xmm0, %xmm5 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15] -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6, %xmm6 # [0,0,0,0,0,0,128,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6, %xmm6 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0] ; AVX1-NEXT: vpsrlw $8, %xmm6, %xmm6 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [128,0,0,0,128,0,0,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0] ; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5 ; AVX1-NEXT: vpackuswb %xmm6, %xmm5, %xmm5 ; AVX1-NEXT: vpaddb %xmm4, %xmm5, %xmm4 @@ -755,20 +755,20 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm3 = [256,256,256,256,256,256,256,128,128,256,256,256,256,256,256,256] ; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [137,16,241,57,27,205,135,187,147,79,171,117,205,57,32,37] +; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0,147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0] ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] ; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm4, %ymm3 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3 -; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [37,32,57,205,117,171,79,147,187,135,205,27,57,241,16,137] +; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0,187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0] ; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX2NOBW-NEXT: vpackuswb %ymm2, %ymm3, %ymm2 ; AVX2NOBW-NEXT: vpsubb %ymm2, %ymm0, %ymm3 ; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm3[8],ymm1[8],ymm3[9],ymm1[9],ymm3[10],ymm1[10],ymm3[11],ymm1[11],ymm3[12],ymm1[12],ymm3[13],ymm1[13],ymm3[14],ymm1[14],ymm3[15],ymm1[15],ymm3[24],ymm1[24],ymm3[25],ymm1[25],ymm3[26],ymm1[26],ymm3[27],ymm1[27],ymm3[28],ymm1[28],ymm3[29],ymm1[29],ymm3[30],ymm1[30],ymm3[31],ymm1[31] -; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,0,0,0,0,128,0,0,0,0,128,0,0,0,128] +; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; AVX2NOBW-NEXT: vpsrlw $8, %ymm4, %ymm4 ; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[4],ymm1[4],ymm3[5],ymm1[5],ymm3[6],ymm1[6],ymm3[7],ymm1[7],ymm3[16],ymm1[16],ymm3[17],ymm1[17],ymm3[18],ymm1[18],ymm3[19],ymm1[19],ymm3[20],ymm1[20],ymm3[21],ymm1[21],ymm3[22],ymm1[22],ymm3[23],ymm1[23] -; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [128,0,0,0,128,0,0,0,0,128,0,0,0,0,0,0] +; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0] ; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX2NOBW-NEXT: vpackuswb %ymm4, %ymm3, %ymm3 ; AVX2NOBW-NEXT: vpaddb %ymm2, %ymm3, %ymm2 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll index bf98bcc..a11fa370 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll @@ -135,7 +135,7 @@ define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind { ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] -; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2 ; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] @@ -169,7 +169,7 @@ define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind { ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63] -; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; AVX512BW-NEXT: vpmullw %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55] @@ -199,20 +199,20 @@ define <64 x i8> @test_divconstant_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31] ; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [256,256,64,256,256,256,256,256,128,256,256,256,256,256,256,256] ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [137,27,37,19,79,41,171,101,147,79,171,117,205,57,32,37] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [137,0,27,0,37,0,19,0,79,0,41,0,171,0,101,0,147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0] ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23] ; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [256,256,256,256,256,256,256,256,128,256,256,256,256,256,256,256] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [27,111,57,235,241,249,8,9,187,135,205,27,57,241,16,137] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [27,0,111,0,57,0,235,0,241,0,249,0,8,0,9,0,187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 ; AVX512F-NEXT: vpackuswb %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpsubb %ymm3, %ymm2, %ymm2 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31] -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,128,0,0,0,0,0,128,0,0,0,128,0,0,0,128] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23] -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [0,0,0,0,0,0,0,128,0,128,0,0,0,0,0,0] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0] ; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX512F-NEXT: vpackuswb %ymm4, %ymm2, %ymm2 ; AVX512F-NEXT: vpaddb %ymm3, %ymm2, %ymm2 @@ -226,20 +226,20 @@ define <64 x i8> @test_divconstant_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] ; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [256,256,256,256,256,256,256,128,256,256,256,256,256,256,256,256] ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [137,16,241,57,27,205,135,187,9,8,249,241,235,57,111,27] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0,9,0,8,0,249,0,241,0,235,0,57,0,111,0,27,0] ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] ; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [256,256,256,256,256,256,256,128,256,256,256,256,256,64,256,256] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [37,32,57,205,117,171,79,147,101,171,41,79,19,37,27,137] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0,101,0,171,0,41,0,79,0,19,0,37,0,27,0,137,0] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 ; AVX512F-NEXT: vpackuswb %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpsubb %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,0,0,0,0,128,0,128,0,0,0,0,0,0,0] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,0,0,0,128,0,0,0,128,0,0,0,0,0,128,0] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0] ; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512F-NEXT: vpackuswb %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpaddb %ymm3, %ymm0, %ymm0 @@ -259,20 +259,20 @@ define <64 x i8> @test_divconstant_64i8(<64 x i8> %a) nounwind { ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63] ; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 -; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [137,16,241,57,27,205,135,187,9,8,249,241,235,57,111,27,137,27,37,19,79,41,171,101,147,79,171,117,205,57,32,37] +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0,9,0,8,0,249,0,241,0,235,0,57,0,111,0,27,0,137,0,27,0,37,0,19,0,79,0,41,0,171,0,101,0,147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0] ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55] ; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 ; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3 -; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [37,32,57,205,117,171,79,147,101,171,41,79,19,37,27,137,27,111,57,235,241,249,8,9,187,135,205,27,57,241,16,137] +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0,101,0,171,0,41,0,79,0,19,0,37,0,27,0,137,0,27,0,111,0,57,0,235,0,241,0,249,0,8,0,9,0,187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0] ; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3 ; AVX512BW-NEXT: vpackuswb %zmm2, %zmm3, %zmm2 ; AVX512BW-NEXT: vpsubb %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63] -; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [0,0,0,0,0,0,128,0,128,0,0,0,0,0,0,0,0,128,0,0,0,0,0,128,0,0,0,128,0,0,0,128] +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55] -; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [128,0,0,0,128,0,0,0,128,0,0,0,0,0,128,0,0,0,0,0,0,0,0,128,0,128,0,0,0,0,0,0] +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0] ; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0 ; AVX512BW-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0 @@ -473,7 +473,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31] -; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3 ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23] @@ -517,7 +517,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63] -; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] ; AVX512BW-NEXT: vpmullw %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55] @@ -551,20 +551,20 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31] ; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [256,256,64,256,256,256,256,256,128,256,256,256,256,256,256,256] ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [137,27,37,19,79,41,171,101,147,79,171,117,205,57,32,37] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [137,0,27,0,37,0,19,0,79,0,41,0,171,0,101,0,147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0] ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23] ; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [256,256,256,256,256,256,256,256,128,256,256,256,256,256,256,256] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [27,111,57,235,241,249,8,9,187,135,205,27,57,241,16,137] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [27,0,111,0,57,0,235,0,241,0,249,0,8,0,9,0,187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 ; AVX512F-NEXT: vpackuswb %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpsubb %ymm3, %ymm2, %ymm4 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15],ymm4[24],ymm1[24],ymm4[25],ymm1[25],ymm4[26],ymm1[26],ymm4[27],ymm1[27],ymm4[28],ymm1[28],ymm4[29],ymm1[29],ymm4[30],ymm1[30],ymm4[31],ymm1[31] -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [0,128,0,0,0,0,0,128,0,0,0,128,0,0,0,128] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[16],ymm1[16],ymm4[17],ymm1[17],ymm4[18],ymm1[18],ymm4[19],ymm1[19],ymm4[20],ymm1[20],ymm4[21],ymm1[21],ymm4[22],ymm1[22],ymm4[23],ymm1[23] -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,0,0,0,0,0,128,0,128,0,0,0,0,0,0] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 ; AVX512F-NEXT: vpackuswb %ymm5, %ymm4, %ymm4 ; AVX512F-NEXT: vpaddb %ymm3, %ymm4, %ymm3 @@ -585,20 +585,20 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] ; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [256,256,256,256,256,256,256,128,256,256,256,256,256,256,256,256] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [137,16,241,57,27,205,135,187,9,8,249,241,235,57,111,27] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0,9,0,8,0,249,0,241,0,235,0,57,0,111,0,27,0] ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] ; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [256,256,256,256,256,256,256,128,256,256,256,256,256,64,256,256] ; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5 -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [37,32,57,205,117,171,79,147,101,171,41,79,19,37,27,137] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0,101,0,171,0,41,0,79,0,19,0,37,0,27,0,137,0] ; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5 ; AVX512F-NEXT: vpackuswb %ymm4, %ymm5, %ymm4 ; AVX512F-NEXT: vpsubb %ymm4, %ymm0, %ymm5 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm5[8],ymm1[8],ymm5[9],ymm1[9],ymm5[10],ymm1[10],ymm5[11],ymm1[11],ymm5[12],ymm1[12],ymm5[13],ymm1[13],ymm5[14],ymm1[14],ymm5[15],ymm1[15],ymm5[24],ymm1[24],ymm5[25],ymm1[25],ymm5[26],ymm1[26],ymm5[27],ymm1[27],ymm5[28],ymm1[28],ymm5[29],ymm1[29],ymm5[30],ymm1[30],ymm5[31],ymm1[31] -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6 # [0,0,0,0,0,0,128,0,128,0,0,0,0,0,0,0] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] ; AVX512F-NEXT: vpsrlw $8, %ymm6, %ymm6 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm1[0],ymm5[1],ymm1[1],ymm5[2],ymm1[2],ymm5[3],ymm1[3],ymm5[4],ymm1[4],ymm5[5],ymm1[5],ymm5[6],ymm1[6],ymm5[7],ymm1[7],ymm5[16],ymm1[16],ymm5[17],ymm1[17],ymm5[18],ymm1[18],ymm5[19],ymm1[19],ymm5[20],ymm1[20],ymm5[21],ymm1[21],ymm5[22],ymm1[22],ymm5[23],ymm1[23] -; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [128,0,0,0,128,0,0,0,128,0,0,0,0,0,128,0] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0] ; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5 ; AVX512F-NEXT: vpackuswb %ymm6, %ymm5, %ymm5 ; AVX512F-NEXT: vpaddb %ymm4, %ymm5, %ymm4 @@ -624,20 +624,20 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind { ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63] ; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 -; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [137,16,241,57,27,205,135,187,9,8,249,241,235,57,111,27,137,27,37,19,79,41,171,101,147,79,171,117,205,57,32,37] +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 # [137,0,16,0,241,0,57,0,27,0,205,0,135,0,187,0,9,0,8,0,249,0,241,0,235,0,57,0,111,0,27,0,137,0,27,0,37,0,19,0,79,0,41,0,171,0,101,0,147,0,79,0,171,0,117,0,205,0,57,0,32,0,37,0] ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55] ; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 ; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3 -; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [37,32,57,205,117,171,79,147,101,171,41,79,19,37,27,137,27,111,57,235,241,249,8,9,187,135,205,27,57,241,16,137] +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [37,0,32,0,57,0,205,0,117,0,171,0,79,0,147,0,101,0,171,0,41,0,79,0,19,0,37,0,27,0,137,0,27,0,111,0,57,0,235,0,241,0,249,0,8,0,9,0,187,0,135,0,205,0,27,0,57,0,241,0,16,0,137,0] ; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3 ; AVX512BW-NEXT: vpackuswb %zmm2, %zmm3, %zmm2 ; AVX512BW-NEXT: vpsubb %zmm2, %zmm0, %zmm3 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm3[8],zmm1[8],zmm3[9],zmm1[9],zmm3[10],zmm1[10],zmm3[11],zmm1[11],zmm3[12],zmm1[12],zmm3[13],zmm1[13],zmm3[14],zmm1[14],zmm3[15],zmm1[15],zmm3[24],zmm1[24],zmm3[25],zmm1[25],zmm3[26],zmm1[26],zmm3[27],zmm1[27],zmm3[28],zmm1[28],zmm3[29],zmm1[29],zmm3[30],zmm1[30],zmm3[31],zmm1[31],zmm3[40],zmm1[40],zmm3[41],zmm1[41],zmm3[42],zmm1[42],zmm3[43],zmm1[43],zmm3[44],zmm1[44],zmm3[45],zmm1[45],zmm3[46],zmm1[46],zmm3[47],zmm1[47],zmm3[56],zmm1[56],zmm3[57],zmm1[57],zmm3[58],zmm1[58],zmm3[59],zmm1[59],zmm3[60],zmm1[60],zmm3[61],zmm1[61],zmm3[62],zmm1[62],zmm3[63],zmm1[63] -; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm4 # [0,0,0,0,0,0,128,0,128,0,0,0,0,0,0,0,0,128,0,0,0,0,0,128,0,0,0,128,0,0,0,128] +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm4 # [0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0] ; AVX512BW-NEXT: vpsrlw $8, %zmm4, %zmm4 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm1[0],zmm3[1],zmm1[1],zmm3[2],zmm1[2],zmm3[3],zmm1[3],zmm3[4],zmm1[4],zmm3[5],zmm1[5],zmm3[6],zmm1[6],zmm3[7],zmm1[7],zmm3[16],zmm1[16],zmm3[17],zmm1[17],zmm3[18],zmm1[18],zmm3[19],zmm1[19],zmm3[20],zmm1[20],zmm3[21],zmm1[21],zmm3[22],zmm1[22],zmm3[23],zmm1[23],zmm3[32],zmm1[32],zmm3[33],zmm1[33],zmm3[34],zmm1[34],zmm3[35],zmm1[35],zmm3[36],zmm1[36],zmm3[37],zmm1[37],zmm3[38],zmm1[38],zmm3[39],zmm1[39],zmm3[48],zmm1[48],zmm3[49],zmm1[49],zmm3[50],zmm1[50],zmm3[51],zmm1[51],zmm3[52],zmm1[52],zmm3[53],zmm1[53],zmm3[54],zmm1[54],zmm3[55],zmm1[55] -; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [128,0,0,0,128,0,0,0,128,0,0,0,0,0,128,0,0,0,0,0,0,0,0,128,0,128,0,0,0,0,0,0] +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 # [128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0] ; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3 ; AVX512BW-NEXT: vpackuswb %zmm4, %zmm3, %zmm3 ; AVX512BW-NEXT: vpaddb %zmm2, %zmm3, %zmm2 diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index 6e1bf25..d0bb90c 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -130,31 +130,31 @@ define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind { ; X86-SSE2-LABEL: mul_v4i32_1_2_4_8: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,2,4,8] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [2,u,8,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-SSE2-NEXT: retl ; ; X86-SSE4-LABEL: mul_v4i32_1_2_4_8: ; X86-SSE4: # %bb.0: -; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,2,4,8] ; X86-SSE4-NEXT: retl ; ; X64-SSE2-LABEL: mul_v4i32_1_2_4_8: ; X64-SSE2: # %bb.0: ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8] ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2,u,8,u] ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X64-SSE2-NEXT: retq ; ; X64-SSE4-LABEL: mul_v4i32_1_2_4_8: ; X64-SSE4: # %bb.0: -; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8] ; X64-SSE4-NEXT: retq ; ; X64-XOP-LABEL: mul_v4i32_1_2_4_8: @@ -190,12 +190,12 @@ define <4 x i32> @mul_v4i32_1_2_4_8_optsize(<4 x i32> %a0) nounwind optsize { ; ; X86-SSE4-LABEL: mul_v4i32_1_2_4_8_optsize: ; X86-SSE4: # %bb.0: -; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,2,4,8] ; X86-SSE4-NEXT: retl ; ; X64-SSE4-LABEL: mul_v4i32_1_2_4_8_optsize: ; X64-SSE4: # %bb.0: -; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8] ; X64-SSE4-NEXT: retq ; ; X64-XOP-LABEL: mul_v4i32_1_2_4_8_optsize: @@ -989,7 +989,7 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_17_65: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [17,65] ; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 17, i64 65> ret <2 x i64> %1 @@ -999,36 +999,36 @@ define <4 x i32> @mul_v4i32_5_17_33_65(<4 x i32> %a0) nounwind { ; X86-SSE2-LABEL: mul_v4i32_5_17_33_65: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [5,17,33,65] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [17,u,65,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-SSE2-NEXT: retl ; ; X86-SSE4-LABEL: mul_v4i32_5_17_33_65: ; X86-SSE4: # %bb.0: -; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [5,17,33,65] ; X86-SSE4-NEXT: retl ; ; X64-SSE2-LABEL: mul_v4i32_5_17_33_65: ; X64-SSE2: # %bb.0: ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [5,17,33,65] ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [17,u,65,u] ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X64-SSE2-NEXT: retq ; ; X64-SSE4-LABEL: mul_v4i32_5_17_33_65: ; X64-SSE4: # %bb.0: -; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [5,17,33,65] ; X64-SSE4-NEXT: retq ; ; X64-AVX-LABEL: mul_v4i32_5_17_33_65: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [5,17,33,65] ; X64-AVX-NEXT: retq %1 = mul <4 x i32> %a0, <i32 5, i32 17, i32 33, i32 65> ret <4 x i32> %1 @@ -1384,7 +1384,7 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_15_63: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [15,63] ; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 15, i64 63> ret <2 x i64> %1 @@ -1427,7 +1427,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind { ; X64-SSE2-NEXT: movdqa %xmm0, %xmm3 ; X64-SSE2-NEXT: psrlq $32, %xmm3 ; X64-SSE2-NEXT: pmuludq %xmm1, %xmm3 -; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4294967295,4294967295] ; X64-SSE2-NEXT: paddq %xmm3, %xmm0 ; X64-SSE2-NEXT: psllq $32, %xmm0 ; X64-SSE2-NEXT: paddq %xmm2, %xmm0 @@ -1441,7 +1441,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind { ; X64-SSE4-NEXT: movdqa %xmm0, %xmm3 ; X64-SSE4-NEXT: psrlq $32, %xmm3 ; X64-SSE4-NEXT: pmuludq %xmm1, %xmm3 -; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4294967295,4294967295] ; X64-SSE4-NEXT: paddq %xmm3, %xmm0 ; X64-SSE4-NEXT: psllq $32, %xmm0 ; X64-SSE4-NEXT: paddq %xmm2, %xmm0 @@ -1453,7 +1453,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind { ; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 ; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm3 ; X64-XOP-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4294967295,4294967295] ; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 ; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 @@ -1465,7 +1465,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind { ; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 ; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3 ; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4294967295,4294967295] ; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 @@ -1473,7 +1473,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_neg_15_63: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [18446744073709551601,18446744073709551553] ; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 -15, i64 -63> ret <2 x i64> %1 @@ -1516,7 +1516,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind { ; X64-SSE2-NEXT: movdqa %xmm0, %xmm3 ; X64-SSE2-NEXT: psrlq $32, %xmm3 ; X64-SSE2-NEXT: pmuludq %xmm1, %xmm3 -; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4294967295,4294967295] ; X64-SSE2-NEXT: paddq %xmm3, %xmm0 ; X64-SSE2-NEXT: psllq $32, %xmm0 ; X64-SSE2-NEXT: paddq %xmm2, %xmm0 @@ -1530,7 +1530,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind { ; X64-SSE4-NEXT: movdqa %xmm0, %xmm3 ; X64-SSE4-NEXT: psrlq $32, %xmm3 ; X64-SSE4-NEXT: pmuludq %xmm1, %xmm3 -; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4294967295,4294967295] ; X64-SSE4-NEXT: paddq %xmm3, %xmm0 ; X64-SSE4-NEXT: psllq $32, %xmm0 ; X64-SSE4-NEXT: paddq %xmm2, %xmm0 @@ -1542,7 +1542,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind { ; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 ; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm3 ; X64-XOP-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4294967295,4294967295] ; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 ; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 @@ -1554,7 +1554,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind { ; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 ; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3 ; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4294967295,4294967295] ; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 @@ -1562,7 +1562,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_neg_17_65: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [18446744073709551599,18446744073709551551] ; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 -17, i64 -65> ret <2 x i64> %1 @@ -1600,7 +1600,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind { ; X86-SSE2-NEXT: movdqa %xmm0, %xmm3 ; X86-SSE2-NEXT: psrlq $32, %xmm3 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm3 -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,0,0,0,u,u,u,u,255,255,255,255,u,u,u,u] ; X86-SSE2-NEXT: paddq %xmm3, %xmm0 ; X86-SSE2-NEXT: psllq $32, %xmm0 ; X86-SSE2-NEXT: paddq %xmm2, %xmm0 @@ -1614,7 +1614,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind { ; X86-SSE4-NEXT: movdqa %xmm0, %xmm3 ; X86-SSE4-NEXT: psrlq $32, %xmm3 ; X86-SSE4-NEXT: pmuludq %xmm1, %xmm3 -; X86-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,0,0,0,u,u,u,u,255,255,255,255,u,u,u,u] ; X86-SSE4-NEXT: paddq %xmm3, %xmm0 ; X86-SSE4-NEXT: psllq $32, %xmm0 ; X86-SSE4-NEXT: paddq %xmm2, %xmm0 @@ -1628,7 +1628,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind { ; X64-SSE2-NEXT: movdqa %xmm0, %xmm3 ; X64-SSE2-NEXT: psrlq $32, %xmm3 ; X64-SSE2-NEXT: pmuludq %xmm1, %xmm3 -; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0] ; X64-SSE2-NEXT: paddq %xmm3, %xmm0 ; X64-SSE2-NEXT: psllq $32, %xmm0 ; X64-SSE2-NEXT: paddq %xmm2, %xmm0 @@ -1642,7 +1642,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind { ; X64-SSE4-NEXT: movdqa %xmm0, %xmm3 ; X64-SSE4-NEXT: psrlq $32, %xmm3 ; X64-SSE4-NEXT: pmuludq %xmm1, %xmm3 -; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0] ; X64-SSE4-NEXT: paddq %xmm3, %xmm0 ; X64-SSE4-NEXT: psllq $32, %xmm0 ; X64-SSE4-NEXT: paddq %xmm2, %xmm0 @@ -1654,7 +1654,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind { ; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 ; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm3 ; X64-XOP-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0] ; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 ; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 @@ -1666,7 +1666,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind { ; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 ; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3 ; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0] ; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 @@ -1674,7 +1674,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_neg_0_1: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255] ; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 0, i64 -1> ret <2 x i64> %1 @@ -1689,7 +1689,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind { ; X86-SSE2-NEXT: movdqa %xmm0, %xmm3 ; X86-SSE2-NEXT: psrlq $32, %xmm3 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm3 -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,0,0,0,u,u,u,u,255,255,255,255,u,u,u,u] ; X86-SSE2-NEXT: paddq %xmm3, %xmm0 ; X86-SSE2-NEXT: psllq $32, %xmm0 ; X86-SSE2-NEXT: paddq %xmm2, %xmm0 @@ -1703,7 +1703,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind { ; X86-SSE4-NEXT: movdqa %xmm0, %xmm3 ; X86-SSE4-NEXT: psrlq $32, %xmm3 ; X86-SSE4-NEXT: pmuludq %xmm1, %xmm3 -; X86-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,0,0,0,u,u,u,u,255,255,255,255,u,u,u,u] ; X86-SSE4-NEXT: paddq %xmm3, %xmm0 ; X86-SSE4-NEXT: psllq $32, %xmm0 ; X86-SSE4-NEXT: paddq %xmm2, %xmm0 @@ -1717,7 +1717,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind { ; X64-SSE2-NEXT: movdqa %xmm0, %xmm3 ; X64-SSE2-NEXT: psrlq $32, %xmm3 ; X64-SSE2-NEXT: pmuludq %xmm1, %xmm3 -; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0] ; X64-SSE2-NEXT: paddq %xmm3, %xmm0 ; X64-SSE2-NEXT: psllq $32, %xmm0 ; X64-SSE2-NEXT: paddq %xmm2, %xmm0 @@ -1731,7 +1731,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind { ; X64-SSE4-NEXT: movdqa %xmm0, %xmm3 ; X64-SSE4-NEXT: psrlq $32, %xmm3 ; X64-SSE4-NEXT: pmuludq %xmm1, %xmm3 -; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE4-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0] ; X64-SSE4-NEXT: paddq %xmm3, %xmm0 ; X64-SSE4-NEXT: psllq $32, %xmm0 ; X64-SSE4-NEXT: paddq %xmm2, %xmm0 @@ -1743,7 +1743,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind { ; X64-XOP-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 ; X64-XOP-NEXT: vpsrlq $32, %xmm0, %xmm3 ; X64-XOP-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0] ; X64-XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; X64-XOP-NEXT: vpsllq $32, %xmm0, %xmm0 ; X64-XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 @@ -1755,7 +1755,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind { ; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 ; X64-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3 ; X64-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 -; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0] ; X64-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0 @@ -1763,7 +1763,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_15_neg_63: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [15,18446744073709551553] ; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 15, i64 -63> ret <2 x i64> %1 @@ -1773,36 +1773,36 @@ define <4 x i32> @mul_v4i32_0_15_31_7(<4 x i32> %a0) nounwind { ; X86-SSE2-LABEL: mul_v4i32_0_15_31_7: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,15,31,7] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [15,u,7,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-SSE2-NEXT: retl ; ; X86-SSE4-LABEL: mul_v4i32_0_15_31_7: ; X86-SSE4: # %bb.0: -; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,15,31,7] ; X86-SSE4-NEXT: retl ; ; X64-SSE2-LABEL: mul_v4i32_0_15_31_7: ; X64-SSE2: # %bb.0: ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,15,31,7] ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [15,u,7,u] ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X64-SSE2-NEXT: retq ; ; X64-SSE4-LABEL: mul_v4i32_0_15_31_7: ; X64-SSE4: # %bb.0: -; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,15,31,7] ; X64-SSE4-NEXT: retq ; ; X64-AVX-LABEL: mul_v4i32_0_15_31_7: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,15,31,7] ; X64-AVX-NEXT: retq %1 = mul <4 x i32> %a0, <i32 0, i32 15, i32 31, i32 7> ret <4 x i32> %1 @@ -1947,7 +1947,7 @@ define <2 x i64> @mul_v2i64_68_132(<2 x i64> %x) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_68_132: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [68,132] ; X64-AVX512DQ-NEXT: retq %mul = mul <2 x i64> %x, <i64 68, i64 132> ret <2 x i64> %mul @@ -2009,7 +2009,7 @@ define <2 x i64> @mul_v2i64_60_120(<2 x i64> %x) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_60_120: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [60,124] ; X64-AVX512DQ-NEXT: retq %mul = mul <2 x i64> %x, <i64 60, i64 124> ret <2 x i64> %mul diff --git a/llvm/test/CodeGen/X86/vector-reduce-add-mask.ll b/llvm/test/CodeGen/X86/vector-reduce-add-mask.ll index 983ae59..3d85d55 100644 --- a/llvm/test/CodeGen/X86/vector-reduce-add-mask.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-add-mask.ll @@ -851,7 +851,7 @@ define i16 @test_v4i16_v4i8(<4 x i16> %a0) { ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535] ; SSE2-NEXT: pandn %xmm0, %xmm1 -; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,128,0,64,0,32,u,u,u,u,u,u,u,u] ; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] ; SSE2-NEXT: paddw %xmm0, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll index 93f4ce7..0bf5a8d 100644 --- a/llvm/test/CodeGen/X86/vector-rotate-128.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll @@ -1092,9 +1092,9 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind { ; SSE2-LABEL: constant_rotate_v4i32: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,128,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] @@ -1106,8 +1106,8 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind { ; SSE41-LABEL: constant_rotate_v4i32: ; SSE41: # %bb.0: ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,128,u] +; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128] ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1118,8 +1118,8 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind { ; AVX1-LABEL: constant_rotate_v4i32: ; AVX1: # %bb.0: ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [32,u,128,u] +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128] ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2] @@ -1156,9 +1156,9 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind { ; X86-SSE2-LABEL: constant_rotate_v4i32: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16,32,64,128] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] -; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [32,u,128,u] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll index 64c3118..5ae3e2f 100644 --- a/llvm/test/CodeGen/X86/vector-rotate-256.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll @@ -895,13 +895,13 @@ define <8 x i32> @constant_rotate_v8i32(<8 x i32> %a) nounwind { ; AVX1-LABEL: constant_rotate_v8i32: ; AVX1: # %bb.0: ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [32,u,128,u] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [512,u,2048,u] ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128] +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [256,512,1024,2048] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: vmovshdup {{.*#+}} ymm2 = ymm0[1,1,3,3,5,5,7,7] ; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7] diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll index d565ef0..1602cde 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll @@ -1673,7 +1673,7 @@ define <16 x i8> @constant_shift_v16i8_pairs(<16 x i8> %a) nounwind { ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,0,65535,65535] ; SSE2-NEXT: pandn %xmm0, %xmm1 -; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,128,0,16,0,2,0,32,0,64,0,0,0,8,0,4] ; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [64,64,8,8,1,1,16,16,32,32,128,128,4,4,2,2] @@ -1750,7 +1750,7 @@ define <16 x i8> @constant_shift_v16i8_pairs(<16 x i8> %a) nounwind { ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,0,65535,65535] ; X86-SSE-NEXT: pandn %xmm0, %xmm1 -; X86-SSE-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,128,0,16,0,2,0,32,0,64,0,0,0,8,0,4] ; X86-SSE-NEXT: por %xmm1, %xmm0 ; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [64,64,8,8,1,1,16,16,32,32,128,128,4,4,2,2] diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll index 8cb2c7b..a847da6 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll @@ -1223,7 +1223,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind { ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535] ; SSE2-NEXT: pandn %xmm0, %xmm1 -; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,128,0,64,0,32,0,16,0,8,0,4,0,2] ; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -1275,7 +1275,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind { ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535] ; X86-SSE-NEXT: pandn %xmm0, %xmm1 -; X86-SSE-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,0,0,128,0,64,0,32,0,16,0,8,0,4,0,2] ; X86-SSE-NEXT: por %xmm1, %xmm0 ; X86-SSE-NEXT: retl %shift = lshr <8 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7> diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll index 57874c4..eb39b6a 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll @@ -1480,7 +1480,7 @@ define <4 x i16> @constant_shift_v4i16(<4 x i16> %a) nounwind { ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535] ; SSE2-NEXT: pandn %xmm0, %xmm1 -; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,0,128,0,64,0,32,u,u,u,u,u,u,u,u] ; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -1532,7 +1532,7 @@ define <4 x i16> @constant_shift_v4i16(<4 x i16> %a) nounwind { ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535] ; X86-SSE-NEXT: pandn %xmm0, %xmm1 -; X86-SSE-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,0,0,128,0,64,0,32,u,u,u,u,u,u,u,u] ; X86-SSE-NEXT: por %xmm1, %xmm0 ; X86-SSE-NEXT: retl %shift = lshr <4 x i16> %a, <i16 0, i16 1, i16 2, i16 3> diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll index 99dac74..3085c32 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll @@ -987,21 +987,21 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind { ; SSE2-LABEL: constant_shift_v4i32: ; SSE2: # %bb.0: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [32,u,128,u] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq ; ; SSE41-LABEL: constant_shift_v4i32: ; SSE41: # %bb.0: -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16,32,64,128] ; SSE41-NEXT: retq ; ; AVX1-LABEL: constant_shift_v4i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16,32,64,128] ; AVX1-NEXT: retq ; ; AVX2-LABEL: constant_shift_v4i32: @@ -1032,9 +1032,9 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind { ; X86-SSE-LABEL: constant_shift_v4i32: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16,32,64,128] ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [32,u,128,u] ; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-SSE-NEXT: retl diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll index b56a8b5..f9ccd1e 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll @@ -1117,9 +1117,9 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind { define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind { ; AVX1-LABEL: constant_shift_v8i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [16,32,64,128] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [256,512,256,128] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; @@ -1153,9 +1153,9 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind { ; ; X86-AVX1-LABEL: constant_shift_v8i32: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 +; X86-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 # [16,32,64,128] ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X86-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [256,512,256,128] ; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X86-AVX1-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll index 0e20b18..18d79b6 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll @@ -778,9 +778,9 @@ define <16 x i8> @combine_shl_pshufb(<4 x i32> %a0) { ; SSSE3-LABEL: combine_shl_pshufb: ; SSSE3: # %bb.0: ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSSE3-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSSE3-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,256,65536,65536] ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSSE3-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSSE3-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [256,u,65536,u] ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,2,3,0,5,6,7,4,9,10,11,8,12,13,14,15] @@ -788,13 +788,13 @@ define <16 x i8> @combine_shl_pshufb(<4 x i32> %a0) { ; ; SSE41-LABEL: combine_shl_pshufb: ; SSE41: # %bb.0: -; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,256,65536,65536] ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,2,3,0,5,6,7,4,9,10,11,8,12,13,14,15] ; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_shl_pshufb: ; AVX1: # %bb.0: -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,256,65536,65536] ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,2,3,0,5,6,7,4,9,10,11,8,12,13,14,15] ; AVX1-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-trunc-math.ll b/llvm/test/CodeGen/X86/vector-trunc-math.ll index 1af7542..4235377 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-math.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-math.ll @@ -2110,7 +2110,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind { ; SSE: # %bb.0: ; SSE-NEXT: xorps %xmm2, %xmm2 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2,3] ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2] ; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq @@ -2119,7 +2119,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,1,2,3] ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -2127,7 +2127,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind { ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX2-SLOW-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX2-SLOW-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,1,2,3] ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; @@ -2135,7 +2135,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind { ; AVX2-FAST-ALL: # %bb.0: ; AVX2-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,2,4,6,0,0,0,0] ; AVX2-FAST-ALL-NEXT: vpermd %ymm0, %ymm1, %ymm0 -; AVX2-FAST-ALL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX2-FAST-ALL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,1,2,3] ; AVX2-FAST-ALL-NEXT: vzeroupper ; AVX2-FAST-ALL-NEXT: retq ; @@ -2143,7 +2143,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind { ; AVX2-FAST-PERLANE: # %bb.0: ; AVX2-FAST-PERLANE-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX2-FAST-PERLANE-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX2-FAST-PERLANE-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,1,2,3] ; AVX2-FAST-PERLANE-NEXT: vzeroupper ; AVX2-FAST-PERLANE-NEXT: retq ; @@ -2151,7 +2151,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind { ; AVX512: # %bb.0: ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,1,2,3] ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = mul <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3> @@ -2253,13 +2253,13 @@ define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind { define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; SSE-LABEL: trunc_mul_const_v16i64_v16i8: ; SSE: # %bb.0: -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5 -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6 -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2,3] +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [4,5] +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [6,7] +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [8,9] +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5 # [10,11] +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6 # [12,13] +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7 # [14,15] ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] ; SSE-NEXT: pand %xmm8, %xmm7 ; SSE-NEXT: pand %xmm8, %xmm6 @@ -2280,18 +2280,18 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; ; AVX1-LABEL: trunc_mul_const_v16i64_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm4 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm4 # [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm5 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2,3] +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm5 # [4,5] ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm6 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [6,7] +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm6 # [8,9] ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm7 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [10,11] +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm7 # [12,13] ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 -; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 +; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [14,15] ; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm8 = [255,255] ; AVX1-NEXT: vpand %xmm3, %xmm8, %xmm3 ; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm7 @@ -2313,10 +2313,10 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; ; AVX2-LABEL: trunc_mul_const_v16i64_v16i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 -; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 +; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,1,2,3] +; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [4,5,6,7] +; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [8,9,10,11] +; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [12,13,14,15] ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] ; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3 ; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2 @@ -2335,8 +2335,8 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; ; AVX512F-LABEL: trunc_mul_const_v16i64_v16i8: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; AVX512F-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512F-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,1,2,3,4,5,6,7] +; AVX512F-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [8,9,10,11,12,13,14,15] ; AVX512F-NEXT: vpmovqb %zmm1, %xmm1 ; AVX512F-NEXT: vpmovqb %zmm0, %xmm0 ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] @@ -2345,8 +2345,8 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; ; AVX512BW-LABEL: trunc_mul_const_v16i64_v16i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; AVX512BW-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,1,2,3,4,5,6,7] +; AVX512BW-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [8,9,10,11,12,13,14,15] ; AVX512BW-NEXT: vpmovqb %zmm1, %xmm1 ; AVX512BW-NEXT: vpmovqb %zmm0, %xmm0 ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] @@ -2355,8 +2355,8 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; ; AVX512DQ-LABEL: trunc_mul_const_v16i64_v16i8: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,1,2,3,4,5,6,7] +; AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [8,9,10,11,12,13,14,15] ; AVX512DQ-NEXT: vpmovqb %zmm1, %xmm1 ; AVX512DQ-NEXT: vpmovqb %zmm0, %xmm0 ; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] @@ -2371,27 +2371,27 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind { ; SSE-LABEL: trunc_mul_const_v16i32_v16i8: ; SSE: # %bb.0: ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,1,2,3] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [1,u,3,u] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [5,u,7,u] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [8,9,10,11] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [9,u,11,u] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [12,13,14,15] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] -; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 +; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [13,u,15,u] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] @@ -2406,12 +2406,12 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind { ; ; AVX1-LABEL: trunc_mul_const_v16i32_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 # [0,1,2,3] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm3 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4,5,6,7] +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm3 # [8,9,10,11] ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [12,13,14,15] ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255] ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 @@ -2425,8 +2425,8 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind { ; ; AVX2-LABEL: trunc_mul_const_v16i32_v16i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,1,2,3,4,5,6,7] +; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [8,9,10,11,12,13,14,15] ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] ; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 @@ -2439,7 +2439,7 @@ define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind { ; ; AVX512-LABEL: trunc_mul_const_v16i32_v16i8: ; AVX512: # %bb.0: -; AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] ; AVX512-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vselect-avx.ll b/llvm/test/CodeGen/X86/vselect-avx.ll index 17315c4..1c5be03 100644 --- a/llvm/test/CodeGen/X86/vselect-avx.ll +++ b/llvm/test/CodeGen/X86/vselect-avx.ll @@ -95,7 +95,7 @@ bb: define void @test3(<4 x i32> %induction30, ptr %tmp16, ptr %tmp17, <4 x i16> %tmp3, <4 x i16> %tmp12) { ; AVX1-LABEL: test3: ; AVX1: ## %bb.0: -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ## [2863311531,2863311531,2863311531,2863311531] ; AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm3 ; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vselect-pcmp.ll b/llvm/test/CodeGen/X86/vselect-pcmp.ll index 8543e9f..16700d4 100644 --- a/llvm/test/CodeGen/X86/vselect-pcmp.ll +++ b/llvm/test/CodeGen/X86/vselect-pcmp.ll @@ -1046,7 +1046,7 @@ define <2 x i64> @blend_mask_cond_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %z define <4 x i32> @blend_mask_cond_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) { ; AVX1-LABEL: blend_mask_cond_v4i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [32768,4194304,1073741824,2147483648] ; AVX1-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 ; AVX1-NEXT: retq ; @@ -1211,9 +1211,9 @@ define <4 x i64> @blend_mask_cond_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %z define <8 x i32> @blend_mask_cond_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %z) { ; AVX1-LABEL: blend_mask_cond_v8i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm3 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm3 # [2147483648,1073741824,268435456,536870912] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [268435456,2097152,1073741824,524288] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 ; AVX1-NEXT: vblendvps %ymm0, %ymm2, %ymm1, %ymm0 ; AVX1-NEXT: retq diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll index b2064b1..02d4d88 100644 --- a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll +++ b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll @@ -181,40 +181,38 @@ define zeroext i1 @segmentedStack(ptr readonly %vk1, ptr readonly %vk2, i64 %key ; CHECK-LABEL: segmentedStack: ; CHECK: ## %bb.0: ; CHECK-NEXT: cmpq %gs:816, %rsp -; CHECK-NEXT: jbe LBB3_6 +; CHECK-NEXT: jbe LBB3_7 ; CHECK-NEXT: LBB3_1: ## %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: testq %rdi, %rdi -; CHECK-NEXT: sete %al -; CHECK-NEXT: testq %rsi, %rsi -; CHECK-NEXT: sete %cl -; CHECK-NEXT: orb %al, %cl ; CHECK-NEXT: movq %rdi, %rax ; CHECK-NEXT: orq %rsi, %rax ; CHECK-NEXT: sete %al -; CHECK-NEXT: testb %cl, %cl -; CHECK-NEXT: jne LBB3_4 -; CHECK-NEXT: ## %bb.2: ## %if.end4.i +; CHECK-NEXT: testq %rdi, %rdi +; CHECK-NEXT: je LBB3_5 +; CHECK-NEXT: ## %bb.2: ## %entry +; CHECK-NEXT: testq %rsi, %rsi +; CHECK-NEXT: je LBB3_5 +; CHECK-NEXT: ## %bb.3: ## %if.end4.i ; CHECK-NEXT: movq 8(%rdi), %rdx ; CHECK-NEXT: cmpq 8(%rsi), %rdx -; CHECK-NEXT: jne LBB3_5 -; CHECK-NEXT: ## %bb.3: ## %land.rhs.i.i +; CHECK-NEXT: jne LBB3_6 +; CHECK-NEXT: ## %bb.4: ## %land.rhs.i.i ; CHECK-NEXT: movq (%rsi), %rsi ; CHECK-NEXT: movq (%rdi), %rdi ; CHECK-NEXT: callq _memcmp ; CHECK-NEXT: testl %eax, %eax ; CHECK-NEXT: sete %al -; CHECK-NEXT: LBB3_4: ## %__go_ptr_strings_equal.exit +; CHECK-NEXT: LBB3_5: ## %__go_ptr_strings_equal.exit ; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: popq %rcx ; CHECK-NEXT: retq -; CHECK-NEXT: LBB3_5: +; CHECK-NEXT: LBB3_6: ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: popq %rcx ; CHECK-NEXT: retq -; CHECK-NEXT: LBB3_6: +; CHECK-NEXT: LBB3_7: ; CHECK-NEXT: movl $8, %r10d ; CHECK-NEXT: movl $0, %r11d ; CHECK-NEXT: callq ___morestack @@ -224,43 +222,41 @@ define zeroext i1 @segmentedStack(ptr readonly %vk1, ptr readonly %vk2, i64 %key ; NOCOMPACTUNWIND-LABEL: segmentedStack: ; NOCOMPACTUNWIND: # %bb.0: ; NOCOMPACTUNWIND-NEXT: cmpq %fs:112, %rsp -; NOCOMPACTUNWIND-NEXT: jbe .LBB3_6 +; NOCOMPACTUNWIND-NEXT: jbe .LBB3_7 ; NOCOMPACTUNWIND-NEXT: .LBB3_1: # %entry ; NOCOMPACTUNWIND-NEXT: pushq %rax ; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16 -; NOCOMPACTUNWIND-NEXT: testq %rdi, %rdi -; NOCOMPACTUNWIND-NEXT: sete %al -; NOCOMPACTUNWIND-NEXT: testq %rsi, %rsi -; NOCOMPACTUNWIND-NEXT: sete %cl -; NOCOMPACTUNWIND-NEXT: orb %al, %cl ; NOCOMPACTUNWIND-NEXT: movq %rdi, %rax ; NOCOMPACTUNWIND-NEXT: orq %rsi, %rax ; NOCOMPACTUNWIND-NEXT: sete %al -; NOCOMPACTUNWIND-NEXT: testb %cl, %cl -; NOCOMPACTUNWIND-NEXT: jne .LBB3_4 -; NOCOMPACTUNWIND-NEXT: # %bb.2: # %if.end4.i +; NOCOMPACTUNWIND-NEXT: testq %rdi, %rdi +; NOCOMPACTUNWIND-NEXT: je .LBB3_5 +; NOCOMPACTUNWIND-NEXT: # %bb.2: # %entry +; NOCOMPACTUNWIND-NEXT: testq %rsi, %rsi +; NOCOMPACTUNWIND-NEXT: je .LBB3_5 +; NOCOMPACTUNWIND-NEXT: # %bb.3: # %if.end4.i ; NOCOMPACTUNWIND-NEXT: movq 8(%rdi), %rdx ; NOCOMPACTUNWIND-NEXT: cmpq 8(%rsi), %rdx -; NOCOMPACTUNWIND-NEXT: jne .LBB3_5 -; NOCOMPACTUNWIND-NEXT: # %bb.3: # %land.rhs.i.i +; NOCOMPACTUNWIND-NEXT: jne .LBB3_6 +; NOCOMPACTUNWIND-NEXT: # %bb.4: # %land.rhs.i.i ; NOCOMPACTUNWIND-NEXT: movq (%rsi), %rsi ; NOCOMPACTUNWIND-NEXT: movq (%rdi), %rdi ; NOCOMPACTUNWIND-NEXT: callq memcmp@PLT ; NOCOMPACTUNWIND-NEXT: testl %eax, %eax ; NOCOMPACTUNWIND-NEXT: sete %al -; NOCOMPACTUNWIND-NEXT: .LBB3_4: # %__go_ptr_strings_equal.exit +; NOCOMPACTUNWIND-NEXT: .LBB3_5: # %__go_ptr_strings_equal.exit ; NOCOMPACTUNWIND-NEXT: # kill: def $al killed $al killed $eax ; NOCOMPACTUNWIND-NEXT: popq %rcx ; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 8 ; NOCOMPACTUNWIND-NEXT: retq -; NOCOMPACTUNWIND-NEXT: .LBB3_5: +; NOCOMPACTUNWIND-NEXT: .LBB3_6: ; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16 ; NOCOMPACTUNWIND-NEXT: xorl %eax, %eax ; NOCOMPACTUNWIND-NEXT: # kill: def $al killed $al killed $eax ; NOCOMPACTUNWIND-NEXT: popq %rcx ; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 8 ; NOCOMPACTUNWIND-NEXT: retq -; NOCOMPACTUNWIND-NEXT: .LBB3_6: +; NOCOMPACTUNWIND-NEXT: .LBB3_7: ; NOCOMPACTUNWIND-NEXT: movl $8, %r10d ; NOCOMPACTUNWIND-NEXT: movl $0, %r11d ; NOCOMPACTUNWIND-NEXT: callq __morestack |